diff options
| -rw-r--r-- | scraper/db_paper_doi.csv | 1 | ||||
| -rw-r--r-- | scraper/db_paper_pdf.csv | 3 | ||||
| -rw-r--r-- | scraper/db_paper_pdf_list.csv | 4 | ||||
| -rw-r--r-- | scraper/reports/doi_domains.html | 2 | ||||
| -rw-r--r-- | scraper/reports/doi_institutions_unattributed.csv | 1 | ||||
| -rw-r--r-- | scraper/reports/paper_title_report.html | 2 | ||||
| -rw-r--r-- | scraper/reports/paper_title_report_no_location.html | 2 | ||||
| -rw-r--r-- | scraper/reports/paper_title_report_nonmatching.html | 2 | ||||
| -rw-r--r-- | site/datasets/citations/brainwash.json | 2 | ||||
| -rw-r--r-- | site/datasets/final/brainwash.json | 2 | ||||
| -rw-r--r-- | site/datasets/unknown/brainwash.json | 2 | ||||
| -rw-r--r-- | site/datasets/verified/brainwash.json | 2 |
12 files changed, 17 insertions, 8 deletions
diff --git a/scraper/db_paper_doi.csv b/scraper/db_paper_doi.csv index 98bf2e2d..dc94a7da 100644 --- a/scraper/db_paper_doi.csv +++ b/scraper/db_paper_doi.csv @@ -17513,6 +17513,7 @@ e69152334ac7bbb29d368862cf5ade75ed9bbadb,http://ieeexplore.ieee.org/stamp/stamp. 9f44ab28e5d4b7b9b63908e912582d20e0f1fa29,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4449561
9f4f890f74ac91bdc4323e061502331945474b90,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5929863
9f094341bea610a10346f072bf865cb550a1f1c1,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5403087
+9f25b9efd09cc08c1ae14f301c90b903614968d8,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8575768
9f42dd958afc027d42843945623d8e0ddf4185d1,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7410075
9f182f2f1ed71e57cf56ef161e05ca3e8816fecc,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8418354
9fc17fa5708584fa848164461f82a69e97f6ed69,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8331118
diff --git a/scraper/db_paper_pdf.csv b/scraper/db_paper_pdf.csv index 7f4c04ee..aa56befb 100644 --- a/scraper/db_paper_pdf.csv +++ b/scraper/db_paper_pdf.csv @@ -1340,6 +1340,7 @@ d590ca357910532cc62eeacc56af8f86b9fe642b,https://pdfs.semanticscholar.org/d590/c 35800a537017803dd08274710388734db66b54f0,https://arxiv.org/pdf/1706.02631.pdf
35d8f8449fe98f357c471f2d5ef3b2e2f4154f96,https://arxiv.org/pdf/1606.04856.pdf
3555d849b85e9416e9496c9976084b0e692b63cd,https://pdfs.semanticscholar.org/3555/d849b85e9416e9496c9976084b0e692b63cd.pdf
+35259b3802bf27b7650d301ad34c26f3a8794d88,https://pdfs.semanticscholar.org/3525/9b3802bf27b7650d301ad34c26f3a8794d88.pdf
3533a7714b19396bba8297e0ca22f85ac68ca18a,https://arxiv.org/pdf/1611.06949.pdf
35b9af6057801fb2f28881840c8427c9cf648757,https://arxiv.org/pdf/1707.02785.pdf
35fe83665c61adb513781c7208b92706ae2a1578,https://arxiv.org/pdf/1809.03707.pdf
@@ -6540,6 +6541,7 @@ f03180b87b9b595d48c61b5cf4752fbf6891c452,https://arxiv.org/pdf/1812.02771.pdf f04826cd9f06fa7fa4a3dde88f97f1d4f6457ad7,https://pdfs.semanticscholar.org/f048/26cd9f06fa7fa4a3dde88f97f1d4f6457ad7.pdf
f02a016bfbab5ef6357f34f8a550a5e569dace6b,https://pdfs.semanticscholar.org/eb18/b4658e09520603388f519b99c99438a92ee7.pdf
f0ae807627f81acb63eb5837c75a1e895a92c376,https://pdfs.semanticscholar.org/f0ae/807627f81acb63eb5837c75a1e895a92c376.pdf
+f038758e85c9ee6fee68a4f3992d0303b5c93efd,https://arxiv.org/pdf/1812.04948.pdf
f06f3e1cef2d04af915a932e83b22e46a45f3b73,https://pdfs.semanticscholar.org/f06f/3e1cef2d04af915a932e83b22e46a45f3b73.pdf
f08266cea120e8aa091983da5269ee5e35febe75,https://arxiv.org/pdf/1511.06704.pdf
f06b015bb19bd3c39ac5b1e4320566f8d83a0c84,http://mplab.ucsd.edu/~ksikka/pain_journal.pdf
@@ -8730,6 +8732,7 @@ fd6c144e200c39cb7209d7dc448f3b8c4d20d573,https://pdfs.semanticscholar.org/fd6c/1 683f5c838ea2c9c50f3f5c5fa064c00868751733,https://pdfs.semanticscholar.org/683f/5c838ea2c9c50f3f5c5fa064c00868751733.pdf
68a513b1ac290a69f95869600c751dca25e9495b,https://arxiv.org/pdf/1811.07789.pdf
68ba19afe924699b4a0c84af91c05deb5b03e3bd,https://pdfs.semanticscholar.org/68ba/19afe924699b4a0c84af91c05deb5b03e3bd.pdf
+68ea88440fc48d59c7407e71a193ff1973f9ba7c,https://pdfs.semanticscholar.org/68ea/88440fc48d59c7407e71a193ff1973f9ba7c.pdf
680d5038da3b7528fb14032915324ab4a665337c,https://pdfs.semanticscholar.org/f261/041b944a6c29f0c6bf36850321b93d20d64d.pdf
68ce1572b18c95fe9c60bc11d9d33f8310902154,https://pdfs.semanticscholar.org/d849/25e6b5e2bb88df226ffb267654d74d5a9115.pdf
68111cbb1441bbeaa6303c2a3c36895fec5e8ab9,http://webdiis.unizar.es/~jcivera/papers/pire_etal_ras17.pdf
diff --git a/scraper/db_paper_pdf_list.csv b/scraper/db_paper_pdf_list.csv index 43417157..15e139bc 100644 --- a/scraper/db_paper_pdf_list.csv +++ b/scraper/db_paper_pdf_list.csv @@ -2627,6 +2627,7 @@ d5909f8d82bff4b86cc36ecd000f251c1a76293b,,,http://dl.acm.org/citation.cfm?id=320 35d8f8449fe98f357c471f2d5ef3b2e2f4154f96,https://arxiv.org/pdf/1606.04856.pdf,,,
3555d849b85e9416e9496c9976084b0e692b63cd,https://pdfs.semanticscholar.org/3555/d849b85e9416e9496c9976084b0e692b63cd.pdf,,,
35bcae6b3843a65eb60af2e4051ea1357fd697a6,,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8578160,
+35259b3802bf27b7650d301ad34c26f3a8794d88,https://pdfs.semanticscholar.org/3525/9b3802bf27b7650d301ad34c26f3a8794d88.pdf,,,
3507544e326253c47e4896919cccafe97a0c2661,,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8451433,
3533a7714b19396bba8297e0ca22f85ac68ca18a,https://arxiv.org/pdf/1611.06949.pdf,,,
35b9af6057801fb2f28881840c8427c9cf648757,https://arxiv.org/pdf/1707.02785.pdf,,,
@@ -12904,6 +12905,7 @@ f0ad1d59fc4fd16e37f67bc0d82b4dbd7538512f,,,http://ieeexplore.ieee.org/stamp/stam f0d7c205b2726ff30d7139498aaab3ca205f0e64,,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5335067,
f0ae807627f81acb63eb5837c75a1e895a92c376,https://pdfs.semanticscholar.org/f0ae/807627f81acb63eb5837c75a1e895a92c376.pdf,,,
f0acbc128407606413df764cfea7350e8842c704,,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8268085,
+f038758e85c9ee6fee68a4f3992d0303b5c93efd,https://arxiv.org/pdf/1812.04948.pdf,,,
f06f3e1cef2d04af915a932e83b22e46a45f3b73,https://pdfs.semanticscholar.org/f06f/3e1cef2d04af915a932e83b22e46a45f3b73.pdf,,,
f08266cea120e8aa091983da5269ee5e35febe75,https://arxiv.org/pdf/1511.06704.pdf,,,
f06b015bb19bd3c39ac5b1e4320566f8d83a0c84,http://mplab.ucsd.edu/~ksikka/pain_journal.pdf,,,
@@ -17218,6 +17220,7 @@ fd6c144e200c39cb7209d7dc448f3b8c4d20d573,https://pdfs.semanticscholar.org/fd6c/1 68a513b1ac290a69f95869600c751dca25e9495b,https://arxiv.org/pdf/1811.07789.pdf,,,
687fc78a2e73a21aa0aae29d1499079caaac938c,,,,
68ba19afe924699b4a0c84af91c05deb5b03e3bd,https://pdfs.semanticscholar.org/68ba/19afe924699b4a0c84af91c05deb5b03e3bd.pdf,,,
+68ea88440fc48d59c7407e71a193ff1973f9ba7c,https://pdfs.semanticscholar.org/68ea/88440fc48d59c7407e71a193ff1973f9ba7c.pdf,,,
6813208b94ffa1052760d318169307d1d1c2438e,,,http://dl.acm.org/citation.cfm?id=2830582,
6856a11b98ffffeff6e2f991d3d1a1232c029ea1,,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5771409,
68e6cfb0d7423d3fae579919046639c8e2d04ad7,,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7550058,
@@ -43138,6 +43141,7 @@ e6865b000cf4d4e84c3fe895b7ddfc65a9c4aaec,https://pdfs.semanticscholar.org/e686/5 9fb7a23910f6464902f1b653025f3aeaa20b90dd,https://arxiv.org/pdf/1707.09605.pdf,,,
9ff973fe3f2541712b7a5f413ad0c32b03e710a4,https://pdfs.semanticscholar.org/9ff9/73fe3f2541712b7a5f413ad0c32b03e710a4.pdf,,,
9fdfe1695adac2380f99d3d5cb6879f0ac7f2bfd,https://pdfs.semanticscholar.org/9fdf/e1695adac2380f99d3d5cb6879f0ac7f2bfd.pdf,,,
+9f25b9efd09cc08c1ae14f301c90b903614968d8,,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8575768,
9f42dd958afc027d42843945623d8e0ddf4185d1,,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7410075,
9f182f2f1ed71e57cf56ef161e05ca3e8816fecc,,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8418354,
9fc5c067559b705c681bf5e344a0b780b4d87412,http://downloads.hindawi.com/journals/cin/2018/7208794.pdf,,,
diff --git a/scraper/reports/doi_domains.html b/scraper/reports/doi_domains.html index 634a27ae..c9d692d5 100644 --- a/scraper/reports/doi_domains.html +++ b/scraper/reports/doi_domains.html @@ -1 +1 @@ -<!doctype html><html><head><meta charset='utf-8'><title>DOI Domains</title><link rel='stylesheet' href='reports.css'></head><body><h2>DOI Domains</h2><table border='1' cellpadding='3' cellspacing='3'><tr><td>ieeexplore.ieee.org</td><td>12143</td></tr><tr><td>link.springer.com</td><td>1641</td></tr><tr><td>dl.acm.org</td><td>1493</td></tr><tr><td>linkinghub.elsevier.com</td><td>833</td></tr><tr><td>www.computer.org</td><td>825</td></tr><tr><td>www.ncbi.nlm.nih.gov</td><td>172</td></tr><tr><td>www.sciencedirect.com</td><td>140</td></tr><tr><td>www.worldscientific.com</td><td>54</td></tr><tr><td>www.spiedigitallibrary.org</td><td>44</td></tr><tr><td>www.nature.com</td><td>37</td></tr><tr><td>www.scitepress.org</td><td>25</td></tr><tr><td>www.crossref.org</td><td>24</td></tr><tr><td>onlinelibrary.wiley.com</td><td>23</td></tr><tr><td>www.mitpressjournals.org</td><td>20</td></tr><tr><td>mr.crossref.org</td><td>17</td></tr><tr><td>www.inderscience.com</td><td>16</td></tr><tr><td>arxiv.org</td><td>11</td></tr><tr><td>jivp-eurasipjournals.springeropen.com</td><td>9</td></tr><tr><td>www.isca-speech.org</td><td>8</td></tr><tr><td>www.tandfonline.com</td><td>8</td></tr><tr><td>www.hindawi.com</td><td>6</td></tr><tr><td>jneurodevdisorders.biomedcentral.com</td><td>6</td></tr><tr><td>www.emeraldinsight.com</td><td>5</td></tr><tr><td>content.iospress.com:443</td><td>4</td></tr><tr><td>academic.oup.com</td><td>4</td></tr><tr><td>www.intechopen.com</td><td>4</td></tr><tr><td>infoscience.epfl.ch</td><td>3</td></tr><tr><td>www.bmva.org</td><td>3</td></tr><tr><td>www.base-search.net</td><td>3</td></tr><tr><td>epubs.siam.org</td><td>3</td></tr><tr><td>bmcbioinformatics.biomedcentral.com</td><td>3</td></tr><tr><td>journals.sagepub.com</td><td>2</td></tr><tr><td>tel.archives-ouvertes.fr</td><td>2</td></tr><tr><td>portal.dnb.de</td><td>2</td></tr><tr><td>ethos.bl.uk</td><td>2</td></tr><tr><td>journalofbigdata.springeropen.com</td><td>2</td></tr><tr><td>www.jstage.jst.go.jp</td><td>2</td></tr><tr><td>aclanthology.info</td><td>2</td></tr><tr><td>ora.ox.ac.uk</td><td>2</td></tr><tr><td>autosoftjournal.net</td><td>2</td></tr><tr><td>ipsjcva.springeropen.com</td><td>2</td></tr><tr><td>journal-bcs.springeropen.com</td><td>2</td></tr><tr><td>www.informatica.si</td><td>2</td></tr><tr><td>www.matec-conferences.org</td><td>1</td></tr><tr><td>dpi-proceedings.com</td><td>1</td></tr><tr><td>digital-library.theiet.org</td><td>1</td></tr><tr><td>www.usenix.org</td><td>1</td></tr><tr><td>jphysiolanthropol.biomedcentral.com</td><td>1</td></tr><tr><td>www.liebertpub.com</td><td>1</td></tr><tr><td>spiral.imperial.ac.uk:8443</td><td>1</td></tr><tr><td>annals-csis.org</td><td>1</td></tr><tr><td>www.research-collection.ethz.ch</td><td>1</td></tr><tr><td>www.int-arch-photogramm-remote-sens-spatial-inf-sci.net</td><td>1</td></tr><tr><td>bmcgenomics.biomedcentral.com</td><td>1</td></tr><tr><td>ecommons.udayton.edu</td><td>1</td></tr><tr><td>eprints.soton.ac.uk</td><td>1</td></tr><tr><td>www.teses.usp.br</td><td>1</td></tr><tr><td>www.aast.edu</td><td>1</td></tr><tr><td>ir.library.louisville.edu</td><td>1</td></tr><tr><td>www.mecs-press.org</td><td>1</td></tr><tr><td>jes-eurasipjournals.springeropen.com</td><td>1</td></tr><tr><td>www.lrec-conf.org</td><td>1</td></tr><tr><td>www.ijeee.net</td><td>1</td></tr><tr><td>asp-eurasipjournals.springeropen.com</td><td>1</td></tr><tr><td>www.aaai.org</td><td>1</td></tr><tr><td>journals.tubitak.gov.tr</td><td>1</td></tr><tr><td>cit.fer.hr</td><td>1</td></tr><tr><td>www.morganclaypool.com</td><td>1</td></tr><tr><td>koreascience.or.kr</td><td>1</td></tr><tr><td>www.degruyter.com</td><td>1</td></tr><tr><td>www.isprs-ann-photogramm-remote-sens-spatial-inf-sci.net</td><td>1</td></tr><tr><td>www.mdpi.com</td><td>1</td></tr><tr><td>bmcneurosci.biomedcentral.com</td><td>1</td></tr><tr><td>www.repository.cam.ac.uk</td><td>1</td></tr><tr><td>ro.uow.edu.au</td><td>1</td></tr><tr><td>www.research.lancs.ac.uk</td><td>1</td></tr><tr><td>www.ijcai.org</td><td>1</td></tr><tr><td>hcis-journal.springeropen.com</td><td>1</td></tr><tr><td>digitalcommons.unl.edu</td><td>1</td></tr><tr><td>www.atlantis-press.com</td><td>1</td></tr></table></body></html>
\ No newline at end of file +<!doctype html><html><head><meta charset='utf-8'><title>DOI Domains</title><link rel='stylesheet' href='reports.css'></head><body><h2>DOI Domains</h2><table border='1' cellpadding='3' cellspacing='3'><tr><td>ieeexplore.ieee.org</td><td>12144</td></tr><tr><td>link.springer.com</td><td>1641</td></tr><tr><td>dl.acm.org</td><td>1493</td></tr><tr><td>linkinghub.elsevier.com</td><td>833</td></tr><tr><td>www.computer.org</td><td>825</td></tr><tr><td>www.ncbi.nlm.nih.gov</td><td>172</td></tr><tr><td>www.sciencedirect.com</td><td>140</td></tr><tr><td>www.worldscientific.com</td><td>54</td></tr><tr><td>www.spiedigitallibrary.org</td><td>44</td></tr><tr><td>www.nature.com</td><td>37</td></tr><tr><td>www.scitepress.org</td><td>25</td></tr><tr><td>www.crossref.org</td><td>24</td></tr><tr><td>onlinelibrary.wiley.com</td><td>23</td></tr><tr><td>www.mitpressjournals.org</td><td>20</td></tr><tr><td>mr.crossref.org</td><td>17</td></tr><tr><td>www.inderscience.com</td><td>16</td></tr><tr><td>arxiv.org</td><td>11</td></tr><tr><td>jivp-eurasipjournals.springeropen.com</td><td>9</td></tr><tr><td>www.isca-speech.org</td><td>8</td></tr><tr><td>www.tandfonline.com</td><td>8</td></tr><tr><td>www.hindawi.com</td><td>6</td></tr><tr><td>jneurodevdisorders.biomedcentral.com</td><td>6</td></tr><tr><td>www.emeraldinsight.com</td><td>5</td></tr><tr><td>content.iospress.com:443</td><td>4</td></tr><tr><td>academic.oup.com</td><td>4</td></tr><tr><td>www.intechopen.com</td><td>4</td></tr><tr><td>infoscience.epfl.ch</td><td>3</td></tr><tr><td>www.bmva.org</td><td>3</td></tr><tr><td>www.base-search.net</td><td>3</td></tr><tr><td>epubs.siam.org</td><td>3</td></tr><tr><td>bmcbioinformatics.biomedcentral.com</td><td>3</td></tr><tr><td>journals.sagepub.com</td><td>2</td></tr><tr><td>tel.archives-ouvertes.fr</td><td>2</td></tr><tr><td>portal.dnb.de</td><td>2</td></tr><tr><td>ethos.bl.uk</td><td>2</td></tr><tr><td>journalofbigdata.springeropen.com</td><td>2</td></tr><tr><td>www.jstage.jst.go.jp</td><td>2</td></tr><tr><td>aclanthology.info</td><td>2</td></tr><tr><td>ora.ox.ac.uk</td><td>2</td></tr><tr><td>autosoftjournal.net</td><td>2</td></tr><tr><td>ipsjcva.springeropen.com</td><td>2</td></tr><tr><td>journal-bcs.springeropen.com</td><td>2</td></tr><tr><td>www.informatica.si</td><td>2</td></tr><tr><td>www.matec-conferences.org</td><td>1</td></tr><tr><td>dpi-proceedings.com</td><td>1</td></tr><tr><td>digital-library.theiet.org</td><td>1</td></tr><tr><td>www.usenix.org</td><td>1</td></tr><tr><td>jphysiolanthropol.biomedcentral.com</td><td>1</td></tr><tr><td>www.liebertpub.com</td><td>1</td></tr><tr><td>spiral.imperial.ac.uk:8443</td><td>1</td></tr><tr><td>annals-csis.org</td><td>1</td></tr><tr><td>www.research-collection.ethz.ch</td><td>1</td></tr><tr><td>www.int-arch-photogramm-remote-sens-spatial-inf-sci.net</td><td>1</td></tr><tr><td>bmcgenomics.biomedcentral.com</td><td>1</td></tr><tr><td>ecommons.udayton.edu</td><td>1</td></tr><tr><td>eprints.soton.ac.uk</td><td>1</td></tr><tr><td>www.teses.usp.br</td><td>1</td></tr><tr><td>www.aast.edu</td><td>1</td></tr><tr><td>ir.library.louisville.edu</td><td>1</td></tr><tr><td>www.mecs-press.org</td><td>1</td></tr><tr><td>jes-eurasipjournals.springeropen.com</td><td>1</td></tr><tr><td>www.lrec-conf.org</td><td>1</td></tr><tr><td>www.ijeee.net</td><td>1</td></tr><tr><td>asp-eurasipjournals.springeropen.com</td><td>1</td></tr><tr><td>www.aaai.org</td><td>1</td></tr><tr><td>journals.tubitak.gov.tr</td><td>1</td></tr><tr><td>cit.fer.hr</td><td>1</td></tr><tr><td>www.morganclaypool.com</td><td>1</td></tr><tr><td>koreascience.or.kr</td><td>1</td></tr><tr><td>www.degruyter.com</td><td>1</td></tr><tr><td>www.isprs-ann-photogramm-remote-sens-spatial-inf-sci.net</td><td>1</td></tr><tr><td>www.mdpi.com</td><td>1</td></tr><tr><td>bmcneurosci.biomedcentral.com</td><td>1</td></tr><tr><td>www.repository.cam.ac.uk</td><td>1</td></tr><tr><td>ro.uow.edu.au</td><td>1</td></tr><tr><td>www.research.lancs.ac.uk</td><td>1</td></tr><tr><td>www.ijcai.org</td><td>1</td></tr><tr><td>hcis-journal.springeropen.com</td><td>1</td></tr><tr><td>digitalcommons.unl.edu</td><td>1</td></tr><tr><td>www.atlantis-press.com</td><td>1</td></tr></table></body></html>
\ No newline at end of file diff --git a/scraper/reports/doi_institutions_unattributed.csv b/scraper/reports/doi_institutions_unattributed.csv index 4ad88ade..775c3cf7 100644 --- a/scraper/reports/doi_institutions_unattributed.csv +++ b/scraper/reports/doi_institutions_unattributed.csv @@ -2471,6 +2471,7 @@ e68c133947bbf14834f5353126ae85cc048642db,A Memory Network Approach for Story-Bas 9f723469b6634a7d08d3b888e23421e873c95617,Generative Image Inpainting with Contextual Attention
9fd7d9e982fc2c0710a25f4df568d35262deda8c,A Unified Video Text Detection Method with Network Flow
9fab78015e6e91ba7241a923222acd6c576c6e27,Clothes Advertising by Targeting Principal Actors in Video
+9f25b9efd09cc08c1ae14f301c90b903614968d8,Deep People Detection: A Comparative Study of SSD and LSTM-decoder
9f62ac43a1086c22b9a3d9f192c975d1a5a4b31f,Lighting Analysis and Texture Modification of 3D Human Face Scans
9f259c58034279e73af473f1f47d3ae3dadaf599,An SVM Based Skin Disease Identification Using Local Binary Patterns
a92c207031b0778572bf41803dba1a21076e128b,Unobtrusive Students' Engagement Analysis in Computer Science Laboratory Using Deep Learning Techniques
diff --git a/scraper/reports/paper_title_report.html b/scraper/reports/paper_title_report.html index 1c979bc1..c9f728d2 100644 --- a/scraper/reports/paper_title_report.html +++ b/scraper/reports/paper_title_report.html @@ -1 +1 @@ -<!doctype html><html><head><meta charset='utf-8'><title>Paper Title Sanity Check</title><link rel='stylesheet' href='reports.css'></head><body><h2>Paper Title Sanity Check</h2><table border='1' cellpadding='3' cellspacing='3'><th>key</th><th>name</th><th>our title</th><th>found title</th><th></th><th></th><th>address</th><th>s2 id</th><tr><td>brainwash</td><td>Brainwash</td><td>End-to-End People Detection in Crowded Scenes</td><td>End-to-End People Detection in Crowded Scenes</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=end-to-end people detection in crowded scenes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1bd1645a629f1b612960ab9bba276afd4cf7c666</td></tr></table></body></html>
\ No newline at end of file +<!doctype html><html><head><meta charset='utf-8'><title>Paper Title Sanity Check</title><link rel='stylesheet' href='reports.css'></head><body><h2>Paper Title Sanity Check</h2><table border='1' cellpadding='3' cellspacing='3'><th>key</th><th>name</th><th>our title</th><th>found title</th><th></th><th></th><th>address</th><th>s2 id</th><tr><td>10k_US_adult_faces</td><td>10K US Adult Faces</td><td>The intrinsic memorability of face images</td><td>The intrinsic memorability of face photographs.</td><td><a href="https://pdfs.semanticscholar.org/8b2d/d5c61b23ead5ae5508bb8ce808b5ea266730.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the intrinsic memorability of face images&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8b2dd5c61b23ead5ae5508bb8ce808b5ea266730</td></tr><tr><td>3d_rma</td><td>3D-RMA</td><td>Automatic 3D Face Authentication</td><td>Automatic 3D face authentication</td><td><a href="http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.31.9190&rep=rep1&type=pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=automatic 3d face authentication&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2160788824c4c29ffe213b2cbeb3f52972d73f37</td></tr><tr><td>3dddb_unconstrained</td><td>3D Dynamic</td><td>A 3D Dynamic Database for Unconstrained Face Recognition</td><td>A 3D Dynamic Database for Unconstrained Face Recognition</td><td><a href="https://pdfs.semanticscholar.org/4d4b/b462c9f1d4e4ab1e4aa6a75cc0bc71b38461.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a 3d dynamic database for unconstrained face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4d4bb462c9f1d4e4ab1e4aa6a75cc0bc71b38461</td></tr><tr><td>3dpes</td><td>3DPeS</td><td>3DPes: 3D People Dataset for Surveillance and Forensics</td><td>3DPeS: 3D people dataset for surveillance and forensics</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=3dpes: 3d people dataset for surveillance and forensics&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2e8d0f1802e50cccfd3c0aabac0d0beab3a7846e</td></tr><tr><td>4dfab</td><td>4DFAB</td><td>4DFAB: A Large Scale 4D Facial Expression Database for Biometric Applications</td><td>4DFAB: A Large Scale 4D Facial Expression Database for Biometric Applications</td><td><a href="https://arxiv.org/pdf/1712.01443.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=4dfab: a large scale 4d facial expression database for biometric applications&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a40f9bfd3c45658ee8da70e1f2dfbe1f0c744d43</td></tr><tr><td>fpoq</td><td>50 People One Question</td><td>Merging Pose Estimates Across Space and Time</td><td>Merging Pose Estimates Across Space and Time</td><td><a href="http://authors.library.caltech.edu/41565/1/tracking_bmvc.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=merging pose estimates across space and time&sort=relevance" target="_blank">[s2]</a></td><td></td><td>5753b2b5e442eaa3be066daa4a2ca8d8a0bb1725</td></tr><tr><td>adience</td><td>Adience</td><td>Age and Gender Estimation of Unfiltered Faces</td><td>Age and Gender Estimation of Unfiltered Faces</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=age and gender estimation of unfiltered faces&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1be498d4bbc30c3bfd0029114c784bc2114d67c0</td></tr><tr><td>afad</td><td>AFAD</td><td>Ordinal Regression with a Multiple Output CNN for Age Estimation</td><td>Ordinal Regression with Multiple Output CNN for Age Estimation</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=ordinal regression with a multiple output cnn for age estimation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6618cff7f2ed440a0d2fa9e74ad5469df5cdbe4c</td></tr><tr><td>afew_va</td><td>AFEW-VA</td><td>AFEW-VA database for valence and arousal estimation in-the-wild</td><td>AFEW-VA database for valence and arousal estimation in-the-wild</td><td><a href="https://ibug.doc.ic.ac.uk/media/uploads/documents/afew-va.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=afew-va database for valence and arousal estimation in-the-wild&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2624d84503bc2f8e190e061c5480b6aa4d89277a</td></tr><tr><td>afew_va</td><td>AFEW-VA</td><td>Collecting Large, Richly Annotated Facial-Expression Databases from Movies</td><td>Collecting Large, Richly Annotated Facial-Expression Databases from Movies</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=collecting large, richly annotated facial-expression databases from movies&sort=relevance" target="_blank">[s2]</a></td><td>Australian National University</td><td>b1f4423c227fa37b9680787be38857069247a307</td></tr><tr><td>affectnet</td><td>AffectNet</td><td>AffectNet: A Database for Facial Expression, Valence, and Arousal Computing in the Wild</td><td>AffectNet: A Database for Facial Expression, Valence, and Arousal Computing in the Wild</td><td><a href="https://arxiv.org/pdf/1708.03985.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=affectnet: a database for facial expression, valence, and arousal computing in the wild&sort=relevance" target="_blank">[s2]</a></td><td></td><td>758d7e1be64cc668c59ef33ba8882c8597406e53</td></tr><tr><td>aflw</td><td>AFLW</td><td>Annotated Facial Landmarks in the Wild: A Large-scale, Real-world Database for Facial Landmark Localization</td><td>Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=annotated facial landmarks in the wild: a large-scale, real-world database for facial landmark localization&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td></tr><tr><td>afw</td><td>AFW</td><td>Face detection, pose estimation and landmark localization in the wild</td><td>Face detection, pose estimation, and landmark localization in the wild</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=face detection, pose estimation and landmark localization in the wild&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0e986f51fe45b00633de9fd0c94d082d2be51406</td></tr><tr><td>agedb</td><td>AgeDB</td><td>AgeDB: the first manually collected, in-the-wild age database</td><td>AgeDB: The First Manually Collected, In-the-Wild Age Database</td><td><a href="http://eprints.mdx.ac.uk/22044/1/agedb_kotsia.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=agedb: the first manually collected, in-the-wild age database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>d818568838433a6d6831adde49a58cef05e0c89f</td></tr><tr><td>alert_airport</td><td>ALERT Airport</td><td>A Systematic Evaluation and Benchmark for Person Re-Identification: Features, Metrics, and Datasets</td><td>A Systematic Evaluation and Benchmark for Person Re-Identification: Features, Metrics, and Datasets</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a systematic evaluation and benchmark for person re-identification: features, metrics, and datasets&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6403117f9c005ae81f1e8e6d1302f4a045e3d99d</td></tr><tr><td>am_fed</td><td>AM-FED</td><td>Affectiva MIT Facial Expression Dataset (AM-FED): Naturalistic and Spontaneous Facial Expressions Collected “In the Wild”</td><td>Affectiva-MIT Facial Expression Dataset (AM-FED): Naturalistic and Spontaneous Facial Expressions Collected "In-the-Wild"</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=affectiva mit facial expression dataset (am-fed): naturalistic and spontaneous facial expressions collected “in the wild”&sort=relevance" target="_blank">[s2]</a></td><td></td><td>47aeb3b82f54b5ae8142b4bdda7b614433e69b9a</td></tr><tr><td>apis</td><td>APiS1.0</td><td>Pedestrian Attribute Classification in Surveillance: Database and Evaluation</td><td>Pedestrian Attribute Classification in Surveillance: Database and Evaluation</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pedestrian attribute classification in surveillance: database and evaluation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>488e475eeb3bb39a145f23ede197cd3620f1d98a</td></tr><tr><td>appa_real</td><td>APPA-REAL</td><td>Apparent and Real Age Estimation in Still Images with Deep Residual Regressors on Appa-Real Database</td><td>Apparent and Real Age Estimation in Still Images with Deep Residual Regressors on Appa-Real Database</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=apparent and real age estimation in still images with deep residual regressors on appa-real database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>633c851ebf625ad7abdda2324e9de093cf623141</td></tr><tr><td>appa_real</td><td>APPA-REAL</td><td>From Apparent to Real Age: Gender, Age, Ethnic, Makeup, and Expression Bias Analysis in Real Age Estimation</td><td>From Apparent to Real Age: Gender, Age, Ethnic, Makeup, and Expression Bias Analysis in Real Age Estimation</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=from apparent to real age: gender, age, ethnic, makeup, and expression bias analysis in real age estimation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>7b92d1e53cc87f7a4256695de590098a2f30261e</td></tr><tr><td>ar_facedb</td><td>AR Face</td><td>The AR Face Database</td><td>The AR face database</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the ar face database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6d96f946aaabc734af7fe3fc4454cf8547fcd5ed</td></tr><tr><td>awe_ears</td><td>AWE Ears</td><td>Ear Recognition: More Than a Survey</td><td>Ear Recognition: More Than a Survey</td><td><a href="https://arxiv.org/pdf/1611.06203.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=ear recognition: more than a survey&sort=relevance" target="_blank">[s2]</a></td><td></td><td>84fe5b4ac805af63206012d29523a1e033bc827e</td></tr><tr><td>b3d_ac</td><td>B3D(AC)</td><td>A 3-D Audio-Visual Corpus of Affective Communication</td><td>A 3-D Audio-Visual Corpus of Affective Communication</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a 3-d audio-visual corpus of affective communication&sort=relevance" target="_blank">[s2]</a></td><td></td><td>d08cc366a4a0192a01e9a7495af1eb5d9f9e73ae</td></tr><tr><td>bbc_pose</td><td>BBC Pose</td><td>Automatic and Efficient Human Pose Estimation for Sign Language Videos</td><td>Automatic and Efficient Human Pose Estimation for Sign Language Videos</td><td><a href="http://tomas.pfister.fi/files/charles13ijcv.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=automatic and efficient human pose estimation for sign language videos&sort=relevance" target="_blank">[s2]</a></td><td></td><td>213a579af9e4f57f071b884aa872651372b661fd</td></tr><tr><td>bfm</td><td>BFM</td><td>A 3D Face Model for Pose and Illumination Invariant Face Recognition</td><td>A 3D Face Model for Pose and Illumination Invariant Face Recognition</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a 3d face model for pose and illumination invariant face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>639937b3a1b8bded3f7e9a40e85bd3770016cf3c</td></tr><tr><td>bio_id</td><td>BioID Face</td><td>Robust Face Detection Using the Hausdorff Distance</td><td>Robust Face Detection Using the Hausdorff Distance</td><td><a href="https://pdfs.semanticscholar.org/4053/e3423fb70ad9140ca89351df49675197196a.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=robust face detection using the hausdorff distance&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4053e3423fb70ad9140ca89351df49675197196a</td></tr><tr><td>bosphorus</td><td>The Bosphorus</td><td>Bosphorus Database for 3D Face Analysis</td><td>Bosphorus Database for 3D Face Analysis</td><td><a href="https://pdfs.semanticscholar.org/4254/fbba3846008f50671edc9cf70b99d7304543.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=bosphorus database for 3d face analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2acf7e58f0a526b957be2099c10aab693f795973</td></tr><tr><td>bp4d_plus</td><td>BP4D+</td><td>Multimodal Spontaneous Emotion Corpus for Human Behavior Analysis</td><td>Multimodal Spontaneous Emotion Corpus for Human Behavior Analysis</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multimodal spontaneous emotion corpus for human behavior analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>53ae38a6bb2b21b42bac4f0c4c8ed1f9fa02f9d4</td></tr><tr><td>bp4d_spontanous</td><td>BP4D-Spontanous</td><td>A high resolution spontaneous 3D dynamic facial expression database</td><td>A high-resolution spontaneous 3D dynamic facial expression database</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a high resolution spontaneous 3d dynamic facial expression database&sort=relevance" target="_blank">[s2]</a></td><td>SUNY Binghamton</td><td>b91f54e1581fbbf60392364323d00a0cd43e493c</td></tr><tr><td>bpad</td><td>BPAD</td><td>Describing People: A Poselet-Based Approach to Attribute Classification</td><td>Describing people: A poselet-based approach to attribute classification</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=describing people: a poselet-based approach to attribute classification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>7808937b46acad36e43c30ae4e9f3fd57462853d</td></tr><tr><td>brainwash</td><td>Brainwash</td><td>End-to-End People Detection in Crowded Scenes</td><td>End-to-End People Detection in Crowded Scenes</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=end-to-end people detection in crowded scenes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1bd1645a629f1b612960ab9bba276afd4cf7c666</td></tr><tr><td>bu_3dfe</td><td>BU-3DFE</td><td>A 3D Facial Expression Database For Facial Behavior Research</td><td>A 3D facial expression database for facial behavior research</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a 3d facial expression database for facial behavior research&sort=relevance" target="_blank">[s2]</a></td><td></td><td>cc589c499dcf323fe4a143bbef0074c3e31f9b60</td></tr><tr><td>cacd</td><td></td><td>Cross-Age Reference Coding for Age-Invariant Face Recognition and Retrieval</td><td>Cross-Age Reference Coding for Age-Invariant Face Recognition and Retrieval</td><td><a href="https://pdfs.semanticscholar.org/c44c/84540db1c38ace232ef34b03bda1c81ba039.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=cross-age reference coding for age-invariant face recognition and retrieval&sort=relevance" target="_blank">[s2]</a></td><td></td><td>c44c84540db1c38ace232ef34b03bda1c81ba039</td></tr><tr><td>cafe</td><td>#N/A</td><td>The Child Affective Facial Expression (CAFE) Set: Validity and reliability from untrained adults</td><td>The Child Affective Facial Expression (CAFE) set: validity and reliability from untrained adults</td><td><a href="https://pdfs.semanticscholar.org/2038/8099cc415c772926e47bcbbe554e133343d1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the child affective facial expression (cafe) set: validity and reliability from untrained adults&sort=relevance" target="_blank">[s2]</a></td><td></td><td>20388099cc415c772926e47bcbbe554e133343d1</td></tr><tr><td>caltech_10k_web_faces</td><td>Caltech 10K Web Faces</td><td>Pruning Training Sets for Learning of Object Categories</td><td>Pruning training sets for learning of object categories</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pruning training sets for learning of object categories&sort=relevance" target="_blank">[s2]</a></td><td></td><td>636b8ffc09b1b23ff714ac8350bb35635e49fa3c</td></tr><tr><td>caltech_crp</td><td>Caltech CRP</td><td>Fine-grained classification of pedestrians in video: Benchmark and state of the art</td><td>Fine-grained classification of pedestrians in video: Benchmark and state of the art</td><td><a href="https://arxiv.org/pdf/1605.06177.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=fine-grained classification of pedestrians in video: benchmark and state of the art&sort=relevance" target="_blank">[s2]</a></td><td></td><td>060820f110a72cbf02c14a6d1085bd6e1d994f6a</td></tr><tr><td>caltech_pedestrians</td><td>Caltech Pedestrians</td><td>Pedestrian Detection: A Benchmark</td><td>Pedestrian detection: A benchmark</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pedestrian detection: a benchmark&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1dc35905a1deff8bc74688f2d7e2f48fd2273275</td></tr><tr><td>caltech_pedestrians</td><td>Caltech Pedestrians</td><td>Pedestrian Detection: An Evaluation of the State of the Art</td><td>Pedestrian Detection: An Evaluation of the State of the Art</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pedestrian detection: an evaluation of the state of the art&sort=relevance" target="_blank">[s2]</a></td><td>California Institute of Technology</td><td>f72f6a45ee240cc99296a287ff725aaa7e7ebb35</td></tr><tr><td>cas_peal</td><td>CAS-PEAL</td><td>The CAS-PEAL Large-Scale Chinese Face Database and Baseline Evaluations</td><td>The CAS-PEAL Large-Scale Chinese Face Database and Baseline Evaluations</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the cas-peal large-scale chinese face database and baseline evaluations&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2485c98aa44131d1a2f7d1355b1e372f2bb148ad</td></tr><tr><td>casablanca</td><td>Casablanca</td><td>Context-aware {CNNs} for person head detection</td><td>Context-Aware CNNs for Person Head Detection</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=context-aware {cnns} for person head detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0ceda9dae8b9f322df65ca2ef02caca9758aec6f</td></tr><tr><td>casia_webface</td><td>CASIA Webface</td><td>Learning Face Representation from Scratch</td><td>Learning Face Representation from Scratch</td><td><a href="https://arxiv.org/pdf/1411.7923.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning face representation from scratch&sort=relevance" target="_blank">[s2]</a></td><td></td><td>853bd61bc48a431b9b1c7cab10c603830c488e39</td></tr><tr><td>celeba</td><td>CelebA</td><td>Deep Learning Face Attributes in the Wild</td><td>Deep Learning Face Attributes in the Wild</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=deep learning face attributes in the wild&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6424b69f3ff4d35249c0bb7ef912fbc2c86f4ff4</td></tr><tr><td>cfd</td><td>CFD</td><td>The Chicago face database: A free stimulus set of faces and norming data</td><td>The Chicago face database: A free stimulus set of faces and norming data.</td><td><a href="https://pdfs.semanticscholar.org/4df3/143922bcdf7db78eb91e6b5359d6ada004d2.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the chicago face database: a free stimulus set of faces and norming data&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4df3143922bcdf7db78eb91e6b5359d6ada004d2</td></tr><tr><td>chalearn</td><td>ChaLearn</td><td>ChaLearn Looking at People: A Review of Events and Resources</td><td>ChaLearn looking at people: A review of events and resources</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=chalearn looking at people: a review of events and resources&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8d5998cd984e7cce307da7d46f155f9db99c6590</td></tr><tr><td>chokepoint</td><td>ChokePoint</td><td>Patch-based Probabilistic Image Quality Assessment for Face Selection and Improved Video-based Face Recognition</td><td>Patch-based probabilistic image quality assessment for face selection and improved video-based face recognition</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=patch-based probabilistic image quality assessment for face selection and improved video-based face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0486214fb58ee9a04edfe7d6a74c6d0f661a7668</td></tr><tr><td>clothing_co_parsing</td><td>CCP</td><td>Clothing Co-Parsing by Joint Image Segmentation and Labeling</td><td>Clothing Co-parsing by Joint Image Segmentation and Labeling</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clothing co-parsing by joint image segmentation and labeling&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2bf8541199728262f78d4dced6fb91479b39b738</td></tr><tr><td>cmdp</td><td>CMDP</td><td>Distance Estimation of an Unknown Person from a Portrait</td><td>Distance Estimation of an Unknown Person from a Portrait</td><td><a href="http://authors.library.caltech.edu/49084/13/FaceDistanceEstimation_RONCHI.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=distance estimation of an unknown person from a portrait&sort=relevance" target="_blank">[s2]</a></td><td></td><td>56ae6d94fc6097ec4ca861f0daa87941d1c10b70</td></tr><tr><td>cmu_pie</td><td>CMU PIE</td><td>The CMU Pose, Illumination, and Expression Database</td><td>The CMU Pose, Illumination, and Expression (PIE) Database</td><td><a href="http://www.comp.nus.edu.sg/~tsim/piedb.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the cmu pose, illumination, and expression database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4d423acc78273b75134e2afd1777ba6d3a398973</td></tr><tr><td>coco</td><td>COCO</td><td>Microsoft COCO: Common Objects in Context</td><td>Microsoft COCO: Common Objects in Context</td><td><a href="https://arxiv.org/pdf/1405.0312.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=microsoft coco: common objects in context&sort=relevance" target="_blank">[s2]</a></td><td></td><td>5e0f8c355a37a5a89351c02f174e7a5ddcb98683</td></tr><tr><td>coco_action</td><td>COCO-a</td><td>Describing Common Human Visual Actions in Images</td><td>Describing Common Human Visual Actions in Images</td><td><a href="https://arxiv.org/pdf/1506.02203.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=describing common human visual actions in images&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4946ba10a4d5a7d0a38372f23e6622bd347ae273</td></tr><tr><td>coco_qa</td><td>COCO QA</td><td>Exploring Models and Data for Image Question Answering</td><td>Exploring Models and Data for Image Question Answering</td><td><a href="https://arxiv.org/pdf/1505.02074.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=exploring models and data for image question answering&sort=relevance" target="_blank">[s2]</a></td><td></td><td>35b0331dfcd2897abd5749b49ff5e2b8ba0f7a62</td></tr><tr><td>cofw</td><td>COFW</td><td>Robust face landmark estimation under occlusion</td><td>Robust Face Landmark Estimation under Occlusion</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=robust face landmark estimation under occlusion&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2724ba85ec4a66de18da33925e537f3902f21249</td></tr><tr><td>cohn_kanade</td><td>CK</td><td>Comprehensive Database for Facial Expression Analysis</td><td>Comprehensive Database for Facial Expression Analysis</td><td><a href="https://pdfs.semanticscholar.org/23fc/83c8cfff14a16df7ca497661264fc54ed746.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=comprehensive database for facial expression analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>23fc83c8cfff14a16df7ca497661264fc54ed746</td></tr><tr><td>cohn_kanade_plus</td><td>CK+</td><td>The Extended Cohn-Kanade Dataset (CK+): A complete dataset for action unit and emotion-specified expression</td><td>The Extended Cohn-Kanade Dataset (CK+): A complete dataset for action unit and emotion-specified expression</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the extended cohn-kanade dataset (ck+): a complete dataset for action unit and emotion-specified expression&sort=relevance" target="_blank">[s2]</a></td><td>University of Pittsburgh</td><td>4d9a02d080636e9666c4d1cc438b9893391ec6c7</td></tr><tr><td>columbia_gaze</td><td>Columbia Gaze</td><td>Gaze Locking: Passive Eye Contact Detection for Human–Object Interaction</td><td>Gaze locking: passive eye contact detection for human-object interaction</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=gaze locking: passive eye contact detection for human–object interaction&sort=relevance" target="_blank">[s2]</a></td><td>Columbia University</td><td>06f02199690961ba52997cde1527e714d2b3bf8f</td></tr><tr><td>complex_activities</td><td>Ongoing Complex Activities</td><td>Recognition of Ongoing Complex Activities by Sequence Prediction over a Hierarchical Label Space</td><td>Recognition of ongoing complex activities by sequence prediction over a hierarchical label space</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=recognition of ongoing complex activities by sequence prediction over a hierarchical label space&sort=relevance" target="_blank">[s2]</a></td><td></td><td>65355cbb581a219bd7461d48b3afd115263ea760</td></tr><tr><td>cuhk_train_station</td><td>CUHK Train Station Dataset</td><td>Understanding collective crowd behaviors: Learning a Mixture model of Dynamic pedestrian-Agents</td><td>Understanding collective crowd behaviors: Learning a Mixture model of Dynamic pedestrian-Agents</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=understanding collective crowd behaviors: learning a mixture model of dynamic pedestrian-agents&sort=relevance" target="_blank">[s2]</a></td><td>Chinese University of Hong Kong</td><td>5a4df9bef1872865f0b619ac3aacc97f49e4a035</td></tr><tr><td>cuhk_campus_03</td><td>CUHK03 Campus</td><td>Human Reidentification with Transferred Metric Learning</td><td>Human Reidentification with Transferred Metric Learning</td><td><a href="http://www.ee.cuhk.edu.hk/~xgwang/papers/liZWaccv12.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=human reidentification with transferred metric learning&sort=relevance" target="_blank">[s2]</a></td><td></td><td>44484d2866f222bbb9b6b0870890f9eea1ffb2d0</td></tr><tr><td>cuhk_campus_03</td><td>CUHK03 Campus</td><td>Locally Aligned Feature Transforms across Views</td><td>Locally Aligned Feature Transforms across Views</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=locally aligned feature transforms across views&sort=relevance" target="_blank">[s2]</a></td><td></td><td>38b55d95189c5e69cf4ab45098a48fba407609b4</td></tr><tr><td>cuhk_campus_03</td><td>CUHK03 Campus</td><td>DeepReID: Deep Filter Pairing Neural Network for Person Re-identification</td><td>DeepReID: Deep Filter Pairing Neural Network for Person Re-identification</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=deepreid: deep filter pairing neural network for person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6bd36e9fd0ef20a3074e1430a6cc601e6d407fc3</td></tr><tr><td>cvc_01_barcelona</td><td>CVC-01</td><td>Adaptive Image Sampling and Windows Classification for On-board Pedestrian Detection</td><td>Adaptive Image Sampling and Windows Classification for On-board Pedestrian Detection</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=adaptive image sampling and windows classification for on-board pedestrian detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>57fe081950f21ca03b5b375ae3e84b399c015861</td></tr><tr><td>ufi</td><td>UFI</td><td>Unconstrained Facial Images: Database for Face Recognition under Real-world Conditions</td><td>Unconstrained Facial Images: Database for Face Recognition Under Real-World Conditions</td><td><a href="http://home.zcu.cz/~pkral/papers/kral_micai15.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=unconstrained facial images: database for face recognition under real-world conditions&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4b4106614c1d553365bad75d7866bff0de6056ed</td></tr><tr><td>d3dfacs</td><td>D3DFACS</td><td>A FACS Valid 3D Dynamic Action Unit database with Applications to 3D Dynamic Morphable Facial Modelling</td><td>A FACS valid 3D dynamic action unit database with applications to 3D dynamic morphable facial modeling</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a facs valid 3d dynamic action unit database with applications to 3d dynamic morphable facial modelling&sort=relevance" target="_blank">[s2]</a></td><td></td><td>070de852bc6eb275d7ca3a9cdde8f6be8795d1a3</td></tr><tr><td>dartmouth_children</td><td>Dartmouth Children</td><td>The Dartmouth Database of Children's Faces: Acquisition and validation of a new face stimulus set</td><td>The Dartmouth Database of Children’s Faces: Acquisition and Validation of a New Face Stimulus Set</td><td><a href="https://pdfs.semanticscholar.org/4e6e/e936eb50dd032f7138702fa39b7c18ee8907.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the dartmouth database of children's faces: acquisition and validation of a new face stimulus set&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4e6ee936eb50dd032f7138702fa39b7c18ee8907</td></tr><tr><td>data_61</td><td>Data61 Pedestrian</td><td>A Multi-Modal Graphical Model for Scene Analysis</td><td>A Multi-modal Graphical Model for Scene Analysis</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a multi-modal graphical model for scene analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>563c940054e4b456661762c1ab858e6f730c3159</td></tr><tr><td>deep_fashion</td><td>DeepFashion</td><td>DeepFashion: Powering Robust Clothes Recognition and Retrieval with Rich Annotations</td><td>DeepFashion: Powering Robust Clothes Recognition and Retrieval with Rich Annotations</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=deepfashion: powering robust clothes recognition and retrieval with rich annotations&sort=relevance" target="_blank">[s2]</a></td><td></td><td>18010284894ed0edcca74e5bf768ee2e15ef7841</td></tr><tr><td>deep_fashion</td><td>DeepFashion</td><td>Fashion Landmark Detection in the Wild</td><td>Fashion Landmark Detection in the Wild</td><td><a href="https://arxiv.org/pdf/1608.03049.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=fashion landmark detection in the wild&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4fefd1bc8dc4e0ab37ee3324ddfa43ad9d6a04a7</td></tr><tr><td>disfa</td><td>DISFA</td><td>1</td><td>DISFA: A Spontaneous Facial Action Intensity Database</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=1&sort=relevance" target="_blank">[s2]</a></td><td>University of Denver</td><td>5a5f0287484f0d480fed1ce585dbf729586f0edc</td></tr><tr><td>distance_nighttime</td><td>Long Distance Heterogeneous Face</td><td>Nighttime Face Recognition at Long Distance: Cross-distance and Cross-spectral Matching</td><td>Nighttime Face Recognition at Long Distance: Cross-Distance and Cross-Spectral Matching</td><td><a href="https://pdfs.semanticscholar.org/4156/b7e88f2e0ab0a7c095b9bab199ae2b23bd06.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=nighttime face recognition at long distance: cross-distance and cross-spectral matching&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4156b7e88f2e0ab0a7c095b9bab199ae2b23bd06</td></tr><tr><td>duke_mtmc</td><td>Duke MTMC</td><td>Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</td><td>Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</td><td><a href="https://arxiv.org/pdf/1609.01775.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=performance measures and a data set for multi-target, multi-camera tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>27a2fad58dd8727e280f97036e0d2bc55ef5424c</td></tr><tr><td>duke_mtmc</td><td>Duke MTMC</td><td>Improving Person Re-identification by Attribute and Identity Learning</td><td>Improving Person Re-identification by Attribute and Identity Learning</td><td><a href="https://arxiv.org/pdf/1703.07220.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=improving person re-identification by attribute and identity learning&sort=relevance" target="_blank">[s2]</a></td><td></td><td>7f23a4bb0c777dd72cca7665a5f370ac7980217e</td></tr><tr><td>duke_mtmc</td><td>Duke MTMC</td><td>Unlabeled Samples Generated by GAN Improve the Person Re-identification Baseline in Vitro</td><td>Unlabeled Samples Generated by GAN Improve the Person Re-identification Baseline in Vitro</td><td><a href="https://arxiv.org/pdf/1701.07717.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=unlabeled samples generated by gan improve the person re-identification baseline in vitro&sort=relevance" target="_blank">[s2]</a></td><td></td><td>15e1af79939dbf90790b03d8aa02477783fb1d0f</td></tr><tr><td>duke_mtmc</td><td>Duke MTMC</td><td>Tracking Social Groups Within and Across Cameras</td><td>Tracking Social Groups Within and Across Cameras</td><td><a href="https://users.cs.duke.edu/~tomasi/papers/ristani/ristaniTCAS16.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=tracking social groups within and across cameras&sort=relevance" target="_blank">[s2]</a></td><td>Duke University</td><td>9e644b1e33dd9367be167eb9d832174004840400</td></tr><tr><td>duke_mtmc</td><td>Duke MTMC</td><td>Tracking Multiple People Online and in Real Time</td><td>Tracking Multiple People Online and in Real Time</td><td><a href="https://pdfs.semanticscholar.org/64e0/690dd176a93de9d4328f6e31fc4afe1e7536.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=tracking multiple people online and in real time&sort=relevance" target="_blank">[s2]</a></td><td></td><td>64e0690dd176a93de9d4328f6e31fc4afe1e7536</td></tr><tr><td>emotio_net</td><td>EmotioNet Database</td><td>EmotioNet: An Accurate, Real-Time Algorithm for the Automatic Annotation of a Million Facial Expressions in the Wild</td><td>EmotioNet: An Accurate, Real-Time Algorithm for the Automatic Annotation of a Million Facial Expressions in the Wild</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=emotionet: an accurate, real-time algorithm for the automatic annotation of a million facial expressions in the wild&sort=relevance" target="_blank">[s2]</a></td><td></td><td>c900e0ad4c95948baaf0acd8449fde26f9b4952a</td></tr><tr><td>erce</td><td>ERCe</td><td>Video Synopsis by Heterogeneous Multi-source Correlation</td><td>Video Synopsis by Heterogeneous Multi-source Correlation</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=video synopsis by heterogeneous multi-source correlation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>b6c293f0420f7e945b5916ae44269fb53e139275</td></tr><tr><td>erce</td><td>ERCe</td><td>Learning from Multiple Sources for Video Summarisation</td><td>Learning from Multiple Sources for Video Summarisation</td><td><a href="https://arxiv.org/pdf/1501.03069.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning from multiple sources for video summarisation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>287ddcb3db5562235d83aee318f318b8d5e43fb1</td></tr><tr><td>eth_andreas_ess</td><td>ETHZ Pedestrian</td><td>Depth and Appearance for Mobile Scene Analysis</td><td>Depth and Appearance for Mobile Scene Analysis</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=depth and appearance for mobile scene analysis&sort=relevance" target="_blank">[s2]</a></td><td>ETH Zurich</td><td>13f06b08f371ba8b5d31c3e288b4deb61335b462</td></tr><tr><td>europersons</td><td>EuroCity Persons</td><td>The EuroCity Persons Dataset: A Novel Benchmark for Object Detection</td><td>The EuroCity Persons Dataset: A Novel Benchmark for Object Detection</td><td><a href="https://arxiv.org/pdf/1805.07193.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the eurocity persons dataset: a novel benchmark for object detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>72a155c987816ae81c858fddbd6beab656d86220</td></tr><tr><td>expw</td><td>ExpW</td><td>From Facial Expression Recognition to Interpersonal Relation Prediction</td><td>From Facial Expression Recognition to Interpersonal Relation Prediction</td><td><a href="https://arxiv.org/pdf/1609.06426.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=from facial expression recognition to interpersonal relation prediction&sort=relevance" target="_blank">[s2]</a></td><td></td><td>22f656d0f8426c84a33a267977f511f127bfd7f3</td></tr><tr><td>face_scrub</td><td>FaceScrub</td><td>A data-driven approach to cleaning large face datasets</td><td>A data-driven approach to cleaning large face datasets</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a data-driven approach to cleaning large face datasets&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0d3bb75852098b25d90f31d2f48fd0cb4944702b</td></tr><tr><td>face_tracer</td><td>FaceTracer</td><td>FaceTracer: A Search Engine for Large Collections of Images with Faces</td><td>FaceTracer: A Search Engine for Large Collections of Images with Faces</td><td><a href="https://pdfs.semanticscholar.org/73a8/1d311eedac8dea3ca24dc15b6990fa4a725e.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=facetracer: a search engine for large collections of images with faces&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4c170a0dcc8de75587dae21ca508dab2f9343974</td></tr><tr><td>face_tracer</td><td>FaceTracer</td><td>Face Swapping: Automatically Replacing Faces in Photographs</td><td>Face swapping: automatically replacing faces in photographs</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=face swapping: automatically replacing faces in photographs&sort=relevance" target="_blank">[s2]</a></td><td></td><td>670637d0303a863c1548d5b19f705860a23e285c</td></tr><tr><td>facebook_100</td><td>Facebook100</td><td>Scaling Up Biologically-Inspired Computer Vision: A Case Study in Unconstrained Face Recognition on Facebook</td><td>Scaling up biologically-inspired computer vision: A case study in unconstrained face recognition on facebook</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=scaling up biologically-inspired computer vision: a case study in unconstrained face recognition on facebook&sort=relevance" target="_blank">[s2]</a></td><td>Harvard University</td><td>9c23859ec7313f2e756a3e85575735e0c52249f4</td></tr><tr><td>faceplace</td><td>Face Place</td><td>Recognizing disguised faces</td><td>Recognizing disguised faces</td><td><a href="https://pdfs.semanticscholar.org/d936/7ceb0be378c3a9ddf7cb741c678c1a3c574c.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=recognizing disguised faces&sort=relevance" target="_blank">[s2]</a></td><td></td><td>25474c21613607f6bb7687a281d5f9d4ffa1f9f3</td></tr><tr><td>families_in_the_wild</td><td>FIW</td><td>Visual Kinship Recognition of Families in the Wild</td><td>Visual Kinship Recognition of Families in the Wild</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=visual kinship recognition of families in the wild&sort=relevance" target="_blank">[s2]</a></td><td>University of Massachusetts Dartmouth</td><td>dd65f71dac86e36eecbd3ed225d016c3336b4a13</td></tr><tr><td>fddb</td><td>FDDB</td><td>FDDB: A Benchmark for Face Detection in Unconstrained Settings</td><td>FDDB: A benchmark for face detection in unconstrained settings</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=fddb: a benchmark for face detection in unconstrained settings&sort=relevance" target="_blank">[s2]</a></td><td></td><td>75da1df4ed319926c544eefe17ec8d720feef8c0</td></tr><tr><td>fei</td><td>FEI</td><td>Captura e Alinhamento de Imagens: Um Banco de Faces Brasileiro</td><td>A new ranking method for principal components analysis and its application to face image analysis</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=captura e alinhamento de imagens: um banco de faces brasileiro&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8b56e33f33e582f3e473dba573a16b598ed9bcdc</td></tr><tr><td>feret</td><td>FERET</td><td>The FERET Verification Testing Protocol for Face Recognition Algorithms</td><td>The FERET Verification Testing Protocol for Face Recognition Algorithms</td><td><a href="https://pdfs.semanticscholar.org/8d2a/1c768fce6f71584dd993fb97e7b6419aaf60.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the feret verification testing protocol for face recognition algorithms&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0c4a139bb87c6743c7905b29a3cfec27a5130652</td></tr><tr><td>feret</td><td>FERET</td><td>The FERET Evaluation Methodology for Face-Recognition Algorithms</td><td>The FERET Evaluation Methodology for Face-Recognition Algorithms</td><td><a href="https://pdfs.semanticscholar.org/5099/7a5605c1f61e09e9a96789ed7495be6625aa.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the feret evaluation methodology for face-recognition algorithms&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0f0fcf041559703998abf310e56f8a2f90ee6f21</td></tr><tr><td>feret</td><td>FERET</td><td>FERET ( Face Recognition Technology ) Recognition Algorithm Development and Test Results</td><td>FERET ( Face Recognition Technology ) Recognition Algorithm Development and Test Results</td><td><a href="https://pdfs.semanticscholar.org/31de/9b3dd6106ce6eec9a35991b2b9083395fd0b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=feret ( face recognition technology ) recognition algorithm development and test results&sort=relevance" target="_blank">[s2]</a></td><td></td><td>31de9b3dd6106ce6eec9a35991b2b9083395fd0b</td></tr><tr><td>feret</td><td>FERET</td><td>The FERET database and evaluation procedure for face-recognition algorithms</td><td>The FERET database and evaluation procedure for face-recognition algorithms</td><td><a href="http://biometrics.nist.gov/cs_links/face/frvt/feret/FERET_Database_evaluation_procedure.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the feret database and evaluation procedure for face-recognition algorithms&sort=relevance" target="_blank">[s2]</a></td><td></td><td>dc8b25e35a3acb812beb499844734081722319b4</td></tr><tr><td>ferplus</td><td>FER+</td><td>Training Deep Networks for Facial Expression Recognition with Crowd-Sourced Label Distribution</td><td>Training deep networks for facial expression recognition with crowd-sourced label distribution</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=training deep networks for facial expression recognition with crowd-sourced label distribution&sort=relevance" target="_blank">[s2]</a></td><td></td><td>298cbc3dfbbb3a20af4eed97906650a4ea1c29e0</td></tr><tr><td>fia</td><td>CMU FiA</td><td>The CMU Face In Action (FIA) Database</td><td>The CMU Face In Action (FIA) Database</td><td><a href="https://pdfs.semanticscholar.org/4766/2d1a368daf70ba70ef2d59eb6209f98b675d.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the cmu face in action (fia) database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>47662d1a368daf70ba70ef2d59eb6209f98b675d</td></tr><tr><td>fiw_300</td><td>300-W</td><td>A semi-automatic methodology for facial landmark annotation</td><td>A Semi-automatic Methodology for Facial Landmark Annotation</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a semi-automatic methodology for facial landmark annotation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>013909077ad843eb6df7a3e8e290cfd5575999d2</td></tr><tr><td>fiw_300</td><td>300-W</td><td>300 Faces in-the-Wild Challenge: The first facial landmark localization Challenge</td><td>300 Faces in-the-Wild Challenge: The First Facial Landmark Localization Challenge</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=300 faces in-the-wild challenge: the first facial landmark localization challenge&sort=relevance" target="_blank">[s2]</a></td><td></td><td>044d9a8c61383312cdafbcc44b9d00d650b21c70</td></tr><tr><td>fiw_300</td><td>300-W</td><td>300 faces In-the-wild challenge: Database and results</td><td>300 Faces In-The-Wild Challenge: database and results</td><td><a href="http://ibug.doc.ic.ac.uk/media/uploads/documents/1-s2.0-s0262885616000147-main.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=300 faces in-the-wild challenge: database and results&sort=relevance" target="_blank">[s2]</a></td><td></td><td>e4754afaa15b1b53e70743880484b8d0736990ff</td></tr><tr><td>frav3d</td><td>FRAV3D</td><td>MULTIMODAL 2D, 2.5D & 3D FACE VERIFICATION</td><td>Multimodal 2D, 2.5D & 3D Face Verification</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multimodal 2d, 2.5d & 3d face verification&sort=relevance" target="_blank">[s2]</a></td><td>Universidad Rey Juan Carlos, Spain</td><td>2b926b3586399d028b46315d7d9fb9d879e4f79c</td></tr><tr><td>frgc</td><td>FRGC</td><td>Overview of the Face Recognition Grand Challenge</td><td>Overview of the face recognition grand challenge</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=overview of the face recognition grand challenge&sort=relevance" target="_blank">[s2]</a></td><td>NIST</td><td>18ae7c9a4bbc832b8b14bc4122070d7939f5e00e</td></tr><tr><td>gallagher</td><td>Gallagher</td><td>Clothing Cosegmentation for Recognizing People</td><td>Clothing cosegmentation for recognizing people</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clothing cosegmentation for recognizing people&sort=relevance" target="_blank">[s2]</a></td><td>Carnegie Mellon University</td><td>22ad2c8c0f4d6aa4328b38d894b814ec22579761</td></tr><tr><td>geofaces</td><td>GeoFaces</td><td>FACE2GPS: Estimating geographic location from facial features</td><td>Exploring the geo-dependence of human face appearance</td><td><a href="http://cs.uky.edu/~jacobs/papers/islam2014faces.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=face2gps: estimating geographic location from facial features&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2cd7821fcf5fae53a185624f7eeda007434ae037</td></tr><tr><td>geofaces</td><td>GeoFaces</td><td>Large-scale geo-facial image analysis</td><td>Large-scale geo-facial image analysis</td><td><a href="https://pdfs.semanticscholar.org/3ede/3ed28329bf48fbd06438a69c4f855bef003f.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=large-scale geo-facial image analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4af89578ac237278be310f7660a408b03f12d603</td></tr><tr><td>geofaces</td><td>GeoFaces</td><td>Exploring the Geo-Dependence of Human Face Appearance</td><td>Exploring the geo-dependence of human face appearance</td><td><a href="http://cs.uky.edu/~jacobs/papers/islam2014faces.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=exploring the geo-dependence of human face appearance&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2cd7821fcf5fae53a185624f7eeda007434ae037</td></tr><tr><td>geofaces</td><td>GeoFaces</td><td>GeoFaceExplorer: Exploring the Geo-Dependence of Facial Attributes</td><td>GeoFaceExplorer: exploring the geo-dependence of facial attributes</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=geofaceexplorer: exploring the geo-dependence of facial attributes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>17b46e2dad927836c689d6787ddb3387c6159ece</td></tr><tr><td>georgia_tech_face_database</td><td>Georgia Tech Face</td><td>Maximum likelihood training of the embedded HMM for face detection and recognition</td><td>Maximum Likelihood Training of the Embedded HMM for Face Detection and Recognition</td><td><a href="http://www.researchgate.net/profile/Monson_Hayes/publication/221124512_Maximum_Likelihood_Training_of_the_Embedded_HMM_for_Face_Detection_and_Recognition/links/0deec53509be9d6f55000000.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=maximum likelihood training of the embedded hmm for face detection and recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3dc3f0b64ef80f573e3a5f96e456e52ee980b877</td></tr><tr><td>gfw</td><td>Grouping Face in the Wild</td><td>Merge or Not? Learning to Group Faces via Imitation Learning</td><td>Merge or Not? Learning to Group Faces via Imitation Learning</td><td><a href="https://arxiv.org/pdf/1707.03986.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=merge or not? learning to group faces via imitation learning&sort=relevance" target="_blank">[s2]</a></td><td></td><td>e58dd160a76349d46f881bd6ddbc2921f08d1050</td></tr><tr><td>graz</td><td>Graz Pedestrian</td><td>Weak Hypotheses and Boosting for Generic Object Detection and Recognition</td><td>Weak Hypotheses and Boosting for Generic Object Detection and Recognition</td><td><a href="https://pdfs.semanticscholar.org/0c91/808994a250d7be332400a534a9291ca3b60e.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=weak hypotheses and boosting for generic object detection and recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0c91808994a250d7be332400a534a9291ca3b60e</td></tr><tr><td>graz</td><td>Graz Pedestrian</td><td>Object Recognition Using Segmentation for Feature Detection</td><td>Object recognition using segmentation for feature detection</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=object recognition using segmentation for feature detection&sort=relevance" target="_blank">[s2]</a></td><td>Inst. of Comput. Sci., Univ. of Leoben, Austria</td><td>12ad3b5bbbf407f8e54ea692c07633d1a867c566</td></tr><tr><td>graz</td><td>Graz Pedestrian</td><td>Generic Object Recognition with Boosting</td><td>Generic object recognition with boosting</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=generic object recognition with boosting&sort=relevance" target="_blank">[s2]</a></td><td>TU Graz</td><td>2eed184680edcdec8a3b605ad1a3ba8e8f7cc2e9</td></tr><tr><td>h3d</td><td>H3D</td><td>Poselets: Body Part Detectors Trained Using 3D Human Pose Annotations</td><td>Poselets: Body part detectors trained using 3D human pose annotations</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=poselets: body part detectors trained using 3d human pose annotations&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2830fb5282de23d7784b4b4bc37065d27839a412</td></tr><tr><td>hda_plus</td><td>HDA+</td><td>The HDA+ data set for research on fully automated re-identification systems</td><td>The HDA+ Data Set for Research on Fully Automated Re-identification Systems</td><td><a href="https://pdfs.semanticscholar.org/8f02/ec0be21461fbcedf51d864f944cfc42c875f.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the hda+ data set for research on fully automated re-identification systems&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8f02ec0be21461fbcedf51d864f944cfc42c875f</td></tr><tr><td>hda_plus</td><td>HDA+</td><td>A Multi-camera video data set for research on High-Definition surveillance</td><td>HDA dataset-DRAFT 1 A Multi-camera video data set for research on High-Definition surveillance</td><td><a href="https://pdfs.semanticscholar.org/bd88/bb2e4f351352d88ee7375af834360e223498.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a multi-camera video data set for research on high-definition surveillance&sort=relevance" target="_blank">[s2]</a></td><td></td><td>bd88bb2e4f351352d88ee7375af834360e223498</td></tr><tr><td>helen</td><td>Helen</td><td>Interactive Facial Feature Localization</td><td>Interactive Facial Feature Localization</td><td><a href="https://pdfs.semanticscholar.org/95f1/2d27c3b4914e0668a268360948bce92f7db3.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=interactive facial feature localization&sort=relevance" target="_blank">[s2]</a></td><td></td><td>95f12d27c3b4914e0668a268360948bce92f7db3</td></tr><tr><td>hi4d_adsip</td><td>Hi4D-ADSIP</td><td>Hi4D-ADSIP 3-D dynamic facial articulation database</td><td>Hi4D-ADSIP 3-D dynamic facial articulation database</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=hi4d-adsip 3-d dynamic facial articulation database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a8d0b149c2eadaa02204d3e4356fbc8eccf3b315</td></tr><tr><td>hipsterwars</td><td>Hipsterwars</td><td>Hipster Wars: Discovering Elements of Fashion Styles</td><td>Hipster Wars: Discovering Elements of Fashion Styles</td><td><a href="https://pdfs.semanticscholar.org/04c2/cda00e5536f4b1508cbd80041e9552880e67.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=hipster wars: discovering elements of fashion styles&sort=relevance" target="_blank">[s2]</a></td><td></td><td>04c2cda00e5536f4b1508cbd80041e9552880e67</td></tr><tr><td>hollywood_headset</td><td>HollywoodHeads</td><td>Context-aware CNNs for person head detection</td><td>Context-Aware CNNs for Person Head Detection</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=context-aware cnns for person head detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0ceda9dae8b9f322df65ca2ef02caca9758aec6f</td></tr><tr><td>hrt_transgender</td><td>HRT Transgender</td><td>Is the Eye Region More Reliable Than the Face? A Preliminary Study of Face-based Recognition on a Transgender Dataset</td><td>Is the eye region more reliable than the face? A preliminary study of face-based recognition on a transgender dataset</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=is the eye region more reliable than the face? a preliminary study of face-based recognition on a transgender dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>137aa2f891d474fce1e7a1d1e9b3aefe21e22b34</td></tr><tr><td>hrt_transgender</td><td>HRT Transgender</td><td>Investigating the Periocular-Based Face Recognition Across Gender Transformation</td><td>Investigating the Periocular-Based Face Recognition Across Gender Transformation</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=investigating the periocular-based face recognition across gender transformation&sort=relevance" target="_blank">[s2]</a></td><td>University of North Carolina at Wilmington</td><td>2f43b614607163abf41dfe5d17ef6749a1b61304</td></tr><tr><td>ibm_dif</td><td>IBM Diversity in Faces</td><td>Diversity in Faces</td><td>Facial Coding Scheme Reference 1 Craniofacial Distances</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=diversity in faces&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0ab7cff2ccda7269b73ff6efd9d37e1318f7db25</td></tr><tr><td>ifad</td><td>IFAD</td><td>Indian Face Age Database: A Database for Face Recognition with Age Variation</td><td>Indian Face Age Database: A Database for Face Recognition with Age Variation</td><td><a href="https://pdfs.semanticscholar.org/025e/4cf3fd3fdeced91e9373b56ee14af7ca432c.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=indian face age database: a database for face recognition with age variation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>55c40cbcf49a0225e72d911d762c27bb1c2d14aa</td></tr><tr><td>ifdb</td><td>IFDB</td><td>Iranian Face Database and Evaluation with a New Detection Algorithm</td><td>Iranian Face Database and Evaluation with a New Detection Algorithm</td><td><a href="https://pdfs.semanticscholar.org/066d/71fcd997033dce4ca58df924397dfe0b5fd1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=iranian face database and evaluation with a new detection algorithm&sort=relevance" target="_blank">[s2]</a></td><td></td><td>066d71fcd997033dce4ca58df924397dfe0b5fd1</td></tr><tr><td>ifdb</td><td>IFDB</td><td>Iranian Face Database with age, pose and expression</td><td>Iranian Face Database with age, pose and expression</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=iranian face database with age, pose and expression&sort=relevance" target="_blank">[s2]</a></td><td>Islamic Azad University</td><td>b71d1aa90dcbe3638888725314c0d56640c1fef1</td></tr><tr><td>iit_dehli_ear</td><td>IIT Dehli Ear</td><td>Automated human identification using ear imaging</td><td>Automated Human Identification Using Ear Imaging</td><td><a href="https://pdfs.semanticscholar.org/faf4/0ce28857aedf183e193486f5b4b0a8c478a2.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=automated human identification using ear imaging&sort=relevance" target="_blank">[s2]</a></td><td></td><td>faf40ce28857aedf183e193486f5b4b0a8c478a2</td></tr><tr><td>ijb_b</td><td>IJB-B</td><td>IARPA Janus Benchmark-B Face Dataset</td><td>IARPA Janus Benchmark-B Face Dataset</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=iarpa janus benchmark-b face dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0cb2dd5f178e3a297a0c33068961018659d0f443</td></tr><tr><td>ijb_a</td><td>IJB-A</td><td>Pushing the Frontiers of Unconstrained Face Detection and Recognition: IARPA Janus Benchmark A</td><td>Pushing the frontiers of unconstrained face detection and recognition: IARPA Janus Benchmark A</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pushing the frontiers of unconstrained face detection and recognition: iarpa janus benchmark a&sort=relevance" target="_blank">[s2]</a></td><td></td><td>140c95e53c619eac594d70f6369f518adfea12ef</td></tr><tr><td>ijb_c</td><td>IJB-C</td><td>IARPA Janus Benchmark C</td><td>IARPA Janus Benchmark - C: Face Dataset and Protocol</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=iarpa janus benchmark c&sort=relevance" target="_blank">[s2]</a></td><td></td><td>57178b36c21fd7f4529ac6748614bb3374714e91</td></tr><tr><td>ilids_mcts</td><td>i-LIDS Multiple-Camera</td><td>Imagery Library for Intelligent Detection Systems: The i-LIDS User Guide</td><td>Imagery Library for Intelligent Detection Systems (i-LIDS); A Standard for Testing Video Based Detection Systems</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=imagery library for intelligent detection systems: the i-lids user guide&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0297448f3ed948e136bb06ceff10eccb34e5bb77</td></tr><tr><td>ilids_mcts_vid</td><td>iLIDS-VID</td><td>Person Re-Identication by Video Ranking</td><td>Person Re-identification by Video Ranking</td><td><a href="https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person re-identication by video ranking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>98bb029afe2a1239c3fdab517323066f0957b81b</td></tr><tr><td>images_of_groups</td><td>Images of Groups</td><td>Understanding Groups of Images of People</td><td>Understanding images of groups of people</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=understanding groups of images of people&sort=relevance" target="_blank">[s2]</a></td><td>Carnegie Mellon University</td><td>21d9d0deed16f0ad62a4865e9acf0686f4f15492</td></tr><tr><td>imdb_face</td><td>IMDb Face</td><td>The Devil of Face Recognition is in the Noise</td><td>The Devil of Face Recognition is in the Noise</td><td><a href="https://arxiv.org/pdf/1807.11649.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the devil of face recognition is in the noise&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9e31e77f9543ab42474ba4e9330676e18c242e72</td></tr><tr><td>imdb_wiki</td><td>IMDB-Wiki</td><td>Deep expectation of real and apparent age from a single image without facial landmarks</td><td>Deep Expectation of Real and Apparent Age from a Single Image Without Facial Landmarks</td><td><a href="http://www.vision.ee.ethz.ch/en/publications/papers/articles/eth_biwi_01299.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=deep expectation of real and apparent age from a single image without facial landmarks&sort=relevance" target="_blank">[s2]</a></td><td></td><td>10195a163ab6348eef37213a46f60a3d87f289c5</td></tr><tr><td>imdb_wiki</td><td>IMDB-Wiki</td><td>DEX: Deep EXpectation of apparent age from a single image</td><td>DEX: Deep EXpectation of Apparent Age from a Single Image</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=dex: deep expectation of apparent age from a single image&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8355d095d3534ef511a9af68a3b2893339e3f96b</td></tr><tr><td>imfdb</td><td>IMFDB</td><td>Indian Movie Face Database: A Benchmark for Face Recognition Under Wide Variations</td><td>Indian Movie Face Database: A benchmark for face recognition under wide variations</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=indian movie face database: a benchmark for face recognition under wide variations&sort=relevance" target="_blank">[s2]</a></td><td>BVBCET, Hubli, India</td><td>ca3e88d87e1344d076c964ea89d91a75c417f5ee</td></tr><tr><td>immediacy</td><td>Immediacy</td><td>Multi-task Recurrent Neural Network for Immediacy Prediction</td><td>Multi-task Recurrent Neural Network for Immediacy Prediction</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-task recurrent neural network for immediacy prediction&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1e3df3ca8feab0b36fd293fe689f93bb2aaac591</td></tr><tr><td>imsitu</td><td>imSitu</td><td>Situation Recognition: Visual Semantic Role Labeling for Image Understanding</td><td>Situation Recognition: Visual Semantic Role Labeling for Image Understanding</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=situation recognition: visual semantic role labeling for image understanding&sort=relevance" target="_blank">[s2]</a></td><td></td><td>51eba481dac6b229a7490f650dff7b17ce05df73</td></tr><tr><td>inria_person</td><td>INRIA Pedestrian</td><td>Histograms of Oriented Gradients for Human Detection</td><td>Histograms of oriented gradients for human detection</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=histograms of oriented gradients for human detection&sort=relevance" target="_blank">[s2]</a></td><td>INRIA Rhone-Alps, Montbonnot, France</td><td>10d6b12fa07c7c8d6c8c3f42c7f1c061c131d4c5</td></tr><tr><td>jaffe</td><td>JAFFE</td><td>Coding Facial Expressions with Gabor Wavelets</td><td>Coding Facial Expressions with Gabor Wavelets</td><td><a href="http://physics.lbl.gov/patrecog/images/Facerecog_gabor.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=coding facial expressions with gabor wavelets&sort=relevance" target="_blank">[s2]</a></td><td></td><td>45c31cde87258414f33412b3b12fc5bec7cb3ba9</td></tr><tr><td>jiku_mobile</td><td>Jiku Mobile Video Dataset</td><td>The Jiku Mobile Video Dataset</td><td>The jiku mobile video dataset</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the jiku mobile video dataset&sort=relevance" target="_blank">[s2]</a></td><td>National University of Singapore</td><td>d178cde92ab3dc0dd2ebee5a76a33d556c39448b</td></tr><tr><td>jpl_pose</td><td>JPL-Interaction dataset</td><td>First-Person Activity Recognition: What Are They Doing to Me?</td><td>First-Person Activity Recognition: What Are They Doing to Me?</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=first-person activity recognition: what are they doing to me?&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1aad2da473888cb7ebc1bfaa15bfa0f1502ce005</td></tr><tr><td>kin_face</td><td>UB KinFace</td><td>Understanding Kin Relationships in a Photo</td><td>Understanding Kin Relationships in a Photo</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=understanding kin relationships in a photo&sort=relevance" target="_blank">[s2]</a></td><td></td><td>08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7</td></tr><tr><td>kin_face</td><td>UB KinFace</td><td>Genealogical Face Recognition based on UB KinFace Database</td><td>Genealogical face recognition based on UB KinFace database</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=genealogical face recognition based on ub kinface database&sort=relevance" target="_blank">[s2]</a></td><td>SUNY Buffalo</td><td>2eb84aaba316b095d4bb51da1a3e4365bbf9ab1d</td></tr><tr><td>kin_face</td><td>UB KinFace</td><td>Kinship Verification through Transfer Learning</td><td>Kinship Verification through Transfer Learning</td><td><a href="https://pdfs.semanticscholar.org/4793/f11fbca4a7dba898b9fff68f70d868e2497c.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=kinship verification through transfer learning&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4793f11fbca4a7dba898b9fff68f70d868e2497c</td></tr><tr><td>kinectface</td><td>KinectFaceDB</td><td>KinectFaceDB: A Kinect Database for Face Recognition</td><td>KinectFaceDB: A Kinect Database for Face Recognition</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=kinectfacedb: a kinect database for face recognition&sort=relevance" target="_blank">[s2]</a></td><td>University of North Carolina at Chapel Hill</td><td>0b440695c822a8e35184fb2f60dcdaa8a6de84ae</td></tr><tr><td>kitti</td><td>KITTI</td><td>Vision meets Robotics: The KITTI Dataset</td><td>Vision meets robotics: The KITTI dataset</td><td><a href="https://pdfs.semanticscholar.org/026e/3363b7f76b51cc711886597a44d5f1fd1de2.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=vision meets robotics: the kitti dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>026e3363b7f76b51cc711886597a44d5f1fd1de2</td></tr><tr><td>lag</td><td>LAG</td><td>Large Age-Gap Face Verification by Feature Injection in Deep Networks</td><td>Large age-gap face verification by feature injection in deep networks</td><td><a href="https://arxiv.org/pdf/1602.06149.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=large age-gap face verification by feature injection in deep networks&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0d2dd4fc016cb6a517d8fb43a7cc3ff62964832e</td></tr><tr><td>laofiw</td><td>LAOFIW</td><td>Turning a Blind Eye: Explicit Removal of Biases and Variation from Deep Neural Network Embeddings</td><td>Turning a Blind Eye: Explicit Removal of Biases and Variation from Deep Neural Network Embeddings</td><td><a href="https://arxiv.org/pdf/1809.02169.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=turning a blind eye: explicit removal of biases and variation from deep neural network embeddings&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4eab317b5ac436a949849ed286baa3de2a541eef</td></tr><tr><td>large_scale_person_search</td><td>Large Scale Person Search</td><td>End-to-End Deep Learning for Person Search</td><td>End-to-End Deep Learning for Person Search</td><td><a href="https://pdfs.semanticscholar.org/2161/f6b7ee3c0acc81603b01dc0df689683577b9.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=end-to-end deep learning for person search&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2161f6b7ee3c0acc81603b01dc0df689683577b9</td></tr><tr><td>leeds_sports_pose</td><td>Leeds Sports Pose</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td><a href="http://www.bmva.org/bmvc/2010/conference/paper12/abstract12.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clustered pose and nonlinear appearance models for human pose estimation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td></tr><tr><td>leeds_sports_pose_extended</td><td>Leeds Sports Pose Extended</td><td>Learning Effective Human Pose Estimation from Inaccurate Annotation</td><td>Learning effective human pose estimation from inaccurate annotation</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning effective human pose estimation from inaccurate annotation&sort=relevance" target="_blank">[s2]</a></td><td>University of Leeds</td><td>4e4746094bf60ee83e40d8597a6191e463b57f76</td></tr><tr><td>lfpw</td><td>LFPW</td><td>Localizing Parts of Faces Using a Consensus of Exemplars</td><td>Localizing Parts of Faces Using a Consensus of Exemplars</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=localizing parts of faces using a consensus of exemplars&sort=relevance" target="_blank">[s2]</a></td><td></td><td>140438a77a771a8fb656b39a78ff488066eb6b50</td></tr><tr><td>lfw</td><td>LFW</td><td>Labeled Faces in the Wild: Updates and New Reporting Procedures</td><td>Labeled Faces in the Wild : Updates and New Reporting Procedures</td><td><a href="https://pdfs.semanticscholar.org/2d34/82dcff69c7417c7b933f22de606a0e8e42d4.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=labeled faces in the wild: updates and new reporting procedures&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2d3482dcff69c7417c7b933f22de606a0e8e42d4</td></tr><tr><td>lfw</td><td>LFW</td><td>Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments</td><td>Labeled Faces in the Wild: A Database forStudying Face Recognition in Unconstrained Environments</td><td><a href="https://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=labeled faces in the wild: a database for studying face recognition in unconstrained environments&sort=relevance" target="_blank">[s2]</a></td><td></td><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td></tr><tr><td>lfw</td><td>LFW</td><td>Labeled Faces in the Wild: A Survey</td><td>Labeled Faces in the Wild: A Survey</td><td><a href="https://pdfs.semanticscholar.org/7de6/e81d775e9cd7becbfd1bd685f4e2a5eebb22.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=labeled faces in the wild: a survey&sort=relevance" target="_blank">[s2]</a></td><td></td><td>7de6e81d775e9cd7becbfd1bd685f4e2a5eebb22</td></tr><tr><td>lfw</td><td>LFW</td><td>Effective Unconstrained Face Recognition by Combining Multiple Descriptors and Learned Background Statistics</td><td>Effective Unconstrained Face Recognition by Combining Multiple Descriptors and Learned Background Statistics</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=effective unconstrained face recognition by combining multiple descriptors and learned background statistics&sort=relevance" target="_blank">[s2]</a></td><td></td><td>133f01aec1534604d184d56de866a4bd531dac87</td></tr><tr><td>m2vts</td><td>m2vts</td><td>The M2VTS Multimodal Face Database (Release 1.00)</td><td>The M2VTS Multimodal Face Database (Release 1.00)</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the m2vts multimodal face database (release 1.00)&sort=relevance" target="_blank">[s2]</a></td><td>Laboratoire de Télécommunications et Télédétection, UCL, Louvain-La-Neuve, Belgium</td><td>8be57cdad86fdf8c8290df4ca3149592f3c46dd3</td></tr><tr><td>m2vtsdb_extended</td><td>xm2vtsdb</td><td>XM2VTSDB: The Extended M2VTS Database</td><td>XM2VTSDB : The extended M2VTS database</td><td><a href="https://pdfs.semanticscholar.org/b626/28ac06bbac998a3ab825324a41a11bc3a988.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=xm2vtsdb: the extended m2vts database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>b62628ac06bbac998a3ab825324a41a11bc3a988</td></tr><tr><td>mafa</td><td>MAsked FAces</td><td>Detecting Masked Faces in the Wild with LLE-CNNs</td><td>Detecting Masked Faces in the Wild with LLE-CNNs</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=detecting masked faces in the wild with lle-cnns&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9cc8cf0c7d7fa7607659921b6ff657e17e135ecc</td></tr><tr><td>mafl</td><td>MAFL</td><td>Facial Landmark Detection by Deep Multi-task Learning</td><td>Facial Landmark Detection by Deep Multi-task Learning</td><td><a href="https://pdfs.semanticscholar.org/fcd7/1c18192928a2e0b264edd4d919ab2f8f652a.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=facial landmark detection by deep multi-task learning&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8a3c5507237957d013a0fe0f082cab7f757af6ee</td></tr><tr><td>mafl</td><td>MAFL</td><td>Learning Deep Representation for Face Alignment with Auxiliary Attributes</td><td>Learning Deep Representation for Face Alignment with Auxiliary Attributes</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning deep representation for face alignment with auxiliary attributes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td></tr><tr><td>malf</td><td>MALF</td><td>Fine-grained Evaluation on Face Detection in the Wild.</td><td>Fine-grained evaluation on face detection in the wild</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=fine-grained evaluation on face detection in the wild.&sort=relevance" target="_blank">[s2]</a></td><td></td><td>45e616093a92e5f1e61a7c6037d5f637aa8964af</td></tr><tr><td>mapillary</td><td>Mapillary</td><td>The Mapillary Vistas Dataset for Semantic Understanding of Street Scenes</td><td>The Mapillary Vistas Dataset for Semantic Understanding of Street Scenes</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the mapillary vistas dataset for semantic understanding of street scenes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>79828e6e9f137a583082b8b5a9dfce0c301989b8</td></tr><tr><td>market_1501</td><td>Market 1501</td><td>Improving Person Re-identification by Attribute and Identity Learning</td><td>Improving Person Re-identification by Attribute and Identity Learning</td><td><a href="https://arxiv.org/pdf/1703.07220.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=improving person re-identification by attribute and identity learning&sort=relevance" target="_blank">[s2]</a></td><td></td><td>7f23a4bb0c777dd72cca7665a5f370ac7980217e</td></tr><tr><td>market_1501</td><td>Market 1501</td><td>Scalable Person Re-identification: A Benchmark</td><td>Scalable Person Re-identification: A Benchmark</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=scalable person re-identification: a benchmark&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4308bd8c28e37e2ed9a3fcfe74d5436cce34b410</td></tr><tr><td>market_1501</td><td>Market 1501</td><td>Orientation Driven Bag of Appearances for Person Re-identification</td><td>Orientation Driven Bag of Appearances for Person Re-identification</td><td><a href="https://arxiv.org/pdf/1605.02464.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=orientation driven bag of appearances for person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a7fe834a0af614ce6b50dc093132b031dd9a856b</td></tr><tr><td>mars</td><td>MARS</td><td>MARS: A Video Benchmark for Large-Scale Person Re-identification</td><td>MARS: A Video Benchmark for Large-Scale Person Re-Identification</td><td><a href="https://pdfs.semanticscholar.org/c038/7e788a52f10bf35d4d50659cfa515d89fbec.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=mars: a video benchmark for large-scale person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>c0387e788a52f10bf35d4d50659cfa515d89fbec</td></tr><tr><td>mcgill</td><td>McGill Real World</td><td>Hierarchical Temporal Graphical Model for Head Pose Estimation and Subsequent Attribute Classification in Real-World Videos</td><td>Hierarchical temporal graphical model for head pose estimation and subsequent attribute classification in real-world videos</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=hierarchical temporal graphical model for head pose estimation and subsequent attribute classification in real-world videos&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2d45cfd838016a6e39f6b766ffe85acd649440c7</td></tr><tr><td>mcgill</td><td>McGill Real World</td><td>Robust Semi-automatic Head Pose Labeling for Real-World Face Video Sequences</td><td>Robust semi-automatic head pose labeling for real-world face video sequences</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=robust semi-automatic head pose labeling for real-world face video sequences&sort=relevance" target="_blank">[s2]</a></td><td>McGill University</td><td>c570d1247e337f91e555c3be0e8c8a5aba539d9f</td></tr><tr><td>megaage</td><td>MegaAge</td><td>Quantifying Facial Age by Posterior of Age Comparisons</td><td>Quantifying Facial Age by Posterior of Age Comparisons</td><td><a href="https://arxiv.org/pdf/1708.09687.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=quantifying facial age by posterior of age comparisons&sort=relevance" target="_blank">[s2]</a></td><td></td><td>c72a2ea819df9b0e8cd267eebcc6528b8741e03d</td></tr><tr><td>megaface</td><td>MegaFace</td><td>Level Playing Field for Million Scale Face Recognition</td><td>Level Playing Field for Million Scale Face Recognition</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=level playing field for million scale face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>28d4e027c7e90b51b7d8908fce68128d1964668a</td></tr><tr><td>megaface</td><td>MegaFace</td><td>The MegaFace Benchmark: 1 Million Faces for Recognition at Scale</td><td>The MegaFace Benchmark: 1 Million Faces for Recognition at Scale</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the megaface benchmark: 1 million faces for recognition at scale&sort=relevance" target="_blank">[s2]</a></td><td></td><td>96e0cfcd81cdeb8282e29ef9ec9962b125f379b0</td></tr><tr><td>mifs</td><td>MIFS</td><td>Spoofing Faces Using Makeup: An Investigative Study</td><td>Spoofing faces using makeup: An investigative study</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=spoofing faces using makeup: an investigative study&sort=relevance" target="_blank">[s2]</a></td><td>INRIA Méditerranée</td><td>23e824d1dfc33f3780dd18076284f07bd99f1c43</td></tr><tr><td>mit_cbcl</td><td>MIT CBCL</td><td>Component-based Face Recognition with 3D Morphable Models</td><td>Component-Based Face Recognition with 3D Morphable Models</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=component-based face recognition with 3d morphable models&sort=relevance" target="_blank">[s2]</a></td><td></td><td>079a0a3bf5200994e1f972b1b9197bf2f90e87d4</td></tr><tr><td>miw</td><td>MIW</td><td>Automatic Facial Makeup Detection with Application in Face Recognition</td><td>Automatic facial makeup detection with application in face recognition</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=automatic facial makeup detection with application in face recognition&sort=relevance" target="_blank">[s2]</a></td><td>West Virginia University</td><td>fcc6fe6007c322641796cb8792718641856a22a7</td></tr><tr><td>mmi_facial_expression</td><td>MMI Facial Expression Dataset</td><td>WEB-BASED DATABASE FOR FACIAL EXPRESSION ANALYSIS</td><td>Web-based database for facial expression analysis</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=web-based database for facial expression analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2a75f34663a60ab1b04a0049ed1d14335129e908</td></tr><tr><td>moments_in_time</td><td>Moments in Time</td><td>Moments in Time Dataset: one million videos for event understanding</td><td>Moments in Time Dataset: one million videos for event understanding</td><td><a href="https://arxiv.org/pdf/1801.03150.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=moments in time dataset: one million videos for event understanding&sort=relevance" target="_blank">[s2]</a></td><td></td><td>41976ebc8ab76d9a6861487c97cc7fcbe3b6015f</td></tr><tr><td>morph</td><td>MORPH Commercial</td><td>MORPH: A Longitudinal Image Database of Normal Adult Age-Progression</td><td>MORPH: a longitudinal image database of normal adult age-progression</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=morph: a longitudinal image database of normal adult age-progression&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9055b155cbabdce3b98e16e5ac9c0edf00f9552f</td></tr><tr><td>morph_nc</td><td>MORPH Non-Commercial</td><td>MORPH: A Longitudinal Image Database of Normal Adult Age-Progression</td><td>MORPH: a longitudinal image database of normal adult age-progression</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=morph: a longitudinal image database of normal adult age-progression&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9055b155cbabdce3b98e16e5ac9c0edf00f9552f</td></tr><tr><td>mot</td><td>MOT</td><td>Evaluating Multiple Object Tracking Performance: The CLEAR MOT Metrics</td><td>Evaluating Multiple Object Tracking Performance: The CLEAR MOT Metrics</td><td><a href="https://cvhci.anthropomatik.kit.edu/images/stories/msmmi/papers/eurasip2008.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=evaluating multiple object tracking performance: the clear mot metrics&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2258e01865367018ed6f4262c880df85b94959f8</td></tr><tr><td>mot</td><td>MOT</td><td>Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</td><td>Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</td><td><a href="https://arxiv.org/pdf/1609.01775.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=performance measures and a data set for multi-target, multi-camera tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>27a2fad58dd8727e280f97036e0d2bc55ef5424c</td></tr><tr><td>mot</td><td>MOT</td><td>Learning to associate: HybridBoosted multi-target tracker for crowded scene</td><td>Learning to associate: HybridBoosted multi-target tracker for crowded scene</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning to associate: hybridboosted multi-target tracker for crowded scene&sort=relevance" target="_blank">[s2]</a></td><td>University of Southern California</td><td>5981e6479c3fd4e31644db35d236bfb84ae46514</td></tr><tr><td>mpi_large</td><td>Large MPI Facial Expression</td><td>The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</td><td>The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</td><td><a href="https://pdfs.semanticscholar.org/ea05/0801199f98a1c7c1df6769f23f658299a3ae.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the mpi facial expression database — a validated database of emotional and conversational facial expressions&sort=relevance" target="_blank">[s2]</a></td><td></td><td>ea050801199f98a1c7c1df6769f23f658299a3ae</td></tr><tr><td>mpi_small</td><td>Small MPI Facial Expression</td><td>The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</td><td>The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</td><td><a href="https://pdfs.semanticscholar.org/ea05/0801199f98a1c7c1df6769f23f658299a3ae.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the mpi facial expression database — a validated database of emotional and conversational facial expressions&sort=relevance" target="_blank">[s2]</a></td><td></td><td>ea050801199f98a1c7c1df6769f23f658299a3ae</td></tr><tr><td>mpii_gaze</td><td>MPIIGaze</td><td>Appearance-based Gaze Estimation in the Wild</td><td>Appearance-based gaze estimation in the wild</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=appearance-based gaze estimation in the wild&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0df0d1adea39a5bef318b74faa37de7f3e00b452</td></tr><tr><td>mpii_human_pose</td><td>MPII Human Pose</td><td>2D Human Pose Estimation: New Benchmark and State of the Art Analysis</td><td>2D Human Pose Estimation: New Benchmark and State of the Art Analysis</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=2d human pose estimation: new benchmark and state of the art analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3325860c0c82a93b2eac654f5324dd6a776f609e</td></tr><tr><td>mr2</td><td>MR2</td><td>The MR2: A multi-racial mega-resolution database of facial stimuli</td><td>The MR2: A multi-racial, mega-resolution database of facial stimuli.</td><td><a href="http://www.mpmlab.org/The%20MR2%20face%20database.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the mr2: a multi-racial mega-resolution database of facial stimuli&sort=relevance" target="_blank">[s2]</a></td><td></td><td>578d4ad74818086bb64f182f72e2c8bd31e3d426</td></tr><tr><td>mrp_drone</td><td>MRP Drone</td><td>Investigating Open-World Person Re-identification Using a Drone</td><td>Investigating Open-World Person Re-identification Using a Drone</td><td><a href="https://pdfs.semanticscholar.org/ad01/687649d95cd5b56d7399a9603c4b8e2217d7.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=investigating open-world person re-identification using a drone&sort=relevance" target="_blank">[s2]</a></td><td></td><td>ad01687649d95cd5b56d7399a9603c4b8e2217d7</td></tr><tr><td>msceleb</td><td>MsCeleb</td><td>MS-Celeb-1M: A Dataset and Benchmark for Large-Scale Face Recognition</td><td>MS-Celeb-1M: A Dataset and Benchmark for Large-Scale Face Recognition</td><td><a href="https://arxiv.org/pdf/1607.08221.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=ms-celeb-1m: a dataset and benchmark for large-scale face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>291265db88023e92bb8c8e6390438e5da148e8f5</td></tr><tr><td>msmt_17</td><td>MSMT17</td><td>Person Transfer GAN to Bridge Domain Gap for Person Re-Identification</td><td>Person Transfer GAN to Bridge Domain Gap for Person Re-identification</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person transfer gan to bridge domain gap for person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a0cc5f73a37723a6dd465924143f1cb4976d0169</td></tr><tr><td>mtfl</td><td>MTFL</td><td>Facial Landmark Detection by Deep Multi-task Learning</td><td>Facial Landmark Detection by Deep Multi-task Learning</td><td><a href="https://pdfs.semanticscholar.org/fcd7/1c18192928a2e0b264edd4d919ab2f8f652a.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=facial landmark detection by deep multi-task learning&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8a3c5507237957d013a0fe0f082cab7f757af6ee</td></tr><tr><td>mtfl</td><td>MTFL</td><td>Learning Deep Representation for Face Alignment with Auxiliary Attributes</td><td>Learning Deep Representation for Face Alignment with Auxiliary Attributes</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning deep representation for face alignment with auxiliary attributes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td></tr><tr><td>mug_faces</td><td>MUG Faces</td><td>The MUG Facial Expression Database</td><td>The MUG facial expression database</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the mug facial expression database&sort=relevance" target="_blank">[s2]</a></td><td>Aristotle University of Thessaloniki</td><td>f1af714b92372c8e606485a3982eab2f16772ad8</td></tr><tr><td>multi_pie</td><td>MULTIPIE</td><td>Multi-PIE</td><td>The CMU Pose, Illumination, and Expression (PIE) Database</td><td><a href="http://www.comp.nus.edu.sg/~tsim/piedb.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-pie&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4d423acc78273b75134e2afd1777ba6d3a398973</td></tr><tr><td>names_and_faces</td><td>News Dataset</td><td>Names and Faces</td><td>Names and faces in the news</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=names and faces&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2fda164863a06a92d3a910b96eef927269aeb730</td></tr><tr><td>nd_2006</td><td>ND-2006</td><td>Using a Multi-Instance Enrollment Representation to Improve 3D Face Recognition</td><td>Using a Multi-Instance Enrollment Representation to Improve 3D Face Recognition</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=using a multi-instance enrollment representation to improve 3d face recognition&sort=relevance" target="_blank">[s2]</a></td><td>University of Notre Dame</td><td>fd8168f1c50de85bac58a8d328df0a50248b16ae</td></tr><tr><td>nova_emotions</td><td>Novaemötions Dataset</td><td>Competitive affective gamming: Winning with a smile</td><td>Competitive affective gaming: winning with a smile</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=competitive affective gamming: winning with a smile&sort=relevance" target="_blank">[s2]</a></td><td>Universidade NOVA de Lisboa, Caparica, Portugal</td><td>7f4040b482d16354d5938c1d1b926b544652bf5b</td></tr><tr><td>nova_emotions</td><td>Novaemötions Dataset</td><td>Crowdsourcing facial expressions for affective-interaction</td><td>Crowdsourcing facial expressions for affective-interaction</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=crowdsourcing facial expressions for affective-interaction&sort=relevance" target="_blank">[s2]</a></td><td></td><td>c06b13d0ec3f5c43e2782cd22542588e233733c3</td></tr><tr><td>orl</td><td>ORL</td><td>Parameterisation of a Stochastic Model for Human Face Identification</td><td>Parameterisation of a stochastic model for human face identification</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=parameterisation of a stochastic model for human face identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>55206f0b5f57ce17358999145506cd01e570358c</td></tr><tr><td>pa_100k</td><td>PA-100K</td><td>HydraPlus-Net: Attentive Deep Features for Pedestrian Analysis</td><td>HydraPlus-Net: Attentive Deep Features for Pedestrian Analysis</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=hydraplus-net: attentive deep features for pedestrian analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>f41c7bb02fc97d5fb9cadd7a49c3e558a1c58a44</td></tr><tr><td>penn_fudan</td><td>Penn Fudan</td><td>Object Detection Combining Recognition and Segmentation</td><td>Object Detection Combining Recognition and Segmentation</td><td><a href="https://pdfs.semanticscholar.org/3394/168ff0719b03ff65bcea35336a76b21fe5e4.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=object detection combining recognition and segmentation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3394168ff0719b03ff65bcea35336a76b21fe5e4</td></tr><tr><td>peta</td><td>PETA</td><td>Pedestrian Attribute Recognition At Far Distance</td><td>Pedestrian Attribute Recognition At Far Distance</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pedestrian attribute recognition at far distance&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2a4bbee0b4cf52d5aadbbc662164f7efba89566c</td></tr><tr><td>pets</td><td>PETS 2017</td><td>PETS 2017: Dataset and Challenge</td><td>PETS 2017: Dataset and Challenge</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pets 2017: dataset and challenge&sort=relevance" target="_blank">[s2]</a></td><td></td><td>22909dd19a0ec3b6065334cb5be5392cb24d839d</td></tr><tr><td>pilot_parliament</td><td>PPB</td><td>Gender Shades: Intersectional Accuracy Disparities in Commercial Gender Classification</td><td>Gender Shades: Intersectional Accuracy Disparities in Commercial Gender Classification</td><td><a href="http://proceedings.mlr.press/v81/buolamwini18a/buolamwini18a-supp.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=gender shades: intersectional accuracy disparities in commercial gender classification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>18858cc936947fc96b5c06bbe3c6c2faa5614540</td></tr><tr><td>pipa</td><td>PIPA</td><td>Beyond Frontal Faces: Improving Person Recognition Using Multiple Cues</td><td>Beyond frontal faces: Improving Person Recognition using multiple cues</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=beyond frontal faces: improving person recognition using multiple cues&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0a85bdff552615643dd74646ac881862a7c7072d</td></tr><tr><td>pku_reid</td><td>PKU-Reid</td><td>Swiss-System Based Cascade Ranking for Gait-based Person Re-identification</td><td>Swiss-System Based Cascade Ranking for Gait-Based Person Re-Identification</td><td><a href="https://pdfs.semanticscholar.org/f6c8/d5e35d7e4d60a0104f233ac1a3ab757da53f.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=swiss-system based cascade ranking for gait-based person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>f6c8d5e35d7e4d60a0104f233ac1a3ab757da53f</td></tr><tr><td>pku_reid</td><td>PKU-Reid</td><td>Orientation driven bag of appearances for person re-identification</td><td>Orientation Driven Bag of Appearances for Person Re-identification</td><td><a href="https://arxiv.org/pdf/1605.02464.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=orientation driven bag of appearances for person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a7fe834a0af614ce6b50dc093132b031dd9a856b</td></tr><tr><td>precarious</td><td>Precarious</td><td>Expecting the Unexpected: Training Detectors for Unusual Pedestrians With Adversarial Imposters</td><td>Expecting the Unexpected: Training Detectors for Unusual Pedestrians with Adversarial Imposters</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=expecting the unexpected: training detectors for unusual pedestrians with adversarial imposters&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9e5378e7b336c89735d3bb15cf67eff96f86d39a</td></tr><tr><td>prid</td><td>PRID</td><td>Person Re-Identification by Descriptive and Discriminative Classification</td><td>Person Re-identification by Descriptive and Discriminative Classification</td><td><a href="https://pdfs.semanticscholar.org/4c1b/f0592be3e535faf256c95e27982db9b3d3d3.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person re-identification by descriptive and discriminative classification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>16c7c31a7553d99f1837fc6e88e77b5ccbb346b8</td></tr><tr><td>prw</td><td>PRW</td><td>Person Re-identification in the Wild</td><td>Person Re-identification in the Wild</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person re-identification in the wild&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0b84f07af44f964817675ad961def8a51406dd2e</td></tr><tr><td>psu</td><td>PSU</td><td>Vision-based Analysis of Small Groups in Pedestrian Crowds</td><td>Vision-Based Analysis of Small Groups in Pedestrian Crowds</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=vision-based analysis of small groups in pedestrian crowds&sort=relevance" target="_blank">[s2]</a></td><td></td><td>066000d44d6691d27202896691f08b27117918b9</td></tr><tr><td>pubfig</td><td>PubFig</td><td>Attribute and Simile Classifiers for Face Verification</td><td>Attribute and simile classifiers for face verification</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=attribute and simile classifiers for face verification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>759a3b3821d9f0e08e0b0a62c8b693230afc3f8d</td></tr><tr><td>pubfig_83</td><td>pubfig83</td><td>Scaling Up Biologically-Inspired Computer Vision: A Case Study in Unconstrained Face Recognition on Facebook</td><td>Scaling up biologically-inspired computer vision: A case study in unconstrained face recognition on facebook</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=scaling up biologically-inspired computer vision: a case study in unconstrained face recognition on facebook&sort=relevance" target="_blank">[s2]</a></td><td>Harvard University</td><td>9c23859ec7313f2e756a3e85575735e0c52249f4</td></tr><tr><td>put_face</td><td>Put Face</td><td>The PUT face database</td><td>The put face database</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the put face database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>ae0aee03d946efffdc7af2362a42d3750e7dd48a</td></tr><tr><td>qmul_grid</td><td>GRID</td><td>Time-delayed correlation analysis for multi-camera activity understanding</td><td>Time-Delayed Correlation Analysis for Multi-Camera Activity Understanding</td><td><a href="http://www.eecs.qmul.ac.uk/~ccloy/files/ijcv_2010.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=time-delayed correlation analysis for multi-camera activity understanding&sort=relevance" target="_blank">[s2]</a></td><td>Queen Mary University of London</td><td>2edb87494278ad11641b6cf7a3f8996de12b8e14</td></tr><tr><td>qmul_grid</td><td>GRID</td><td>Multi-Camera Activity Correlation Analysis</td><td>Multi-camera activity correlation analysis</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-camera activity correlation analysis&sort=relevance" target="_blank">[s2]</a></td><td>Queen Mary University of London</td><td>3b5b6d19d4733ab606c39c69a889f9e67967f151</td></tr><tr><td>qmul_surv_face</td><td>QMUL-SurvFace</td><td>Surveillance Face Recognition Challenge</td><td>Surveillance Face Recognition Challenge</td><td><a href="https://arxiv.org/pdf/1804.09691.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=surveillance face recognition challenge&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2306b2a8fba28539306052764a77a0d0f5d1236a</td></tr><tr><td>rafd</td><td>RaFD</td><td>Presentation and validation of the Radboud Faces Database</td><td>Presentation and validation of the Radboud Faces Database</td><td><a href="https://pdfs.semanticscholar.org/3765/df816dc5a061bc261e190acc8bdd9d47bec0.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=presentation and validation of the radboud faces database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3765df816dc5a061bc261e190acc8bdd9d47bec0</td></tr><tr><td>raid</td><td>RAiD</td><td>Consistent Re-identification in a Camera Network</td><td>Consistent Re-identification in a Camera Network</td><td><a href="https://pdfs.semanticscholar.org/c27f/099e6e7e3f7f9979cbe9e0a5175fc5848ea0.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=consistent re-identification in a camera network&sort=relevance" target="_blank">[s2]</a></td><td></td><td>09d78009687bec46e70efcf39d4612822e61cb8c</td></tr><tr><td>rap_pedestrian</td><td>RAP</td><td>A Richly Annotated Dataset for Pedestrian Attribute Recognition</td><td>A Richly Annotated Dataset for Pedestrian Attribute Recognition</td><td><a href="https://arxiv.org/pdf/1603.07054.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a richly annotated dataset for pedestrian attribute recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>221c18238b829c12b911706947ab38fd017acef7</td></tr><tr><td>reseed</td><td>ReSEED</td><td>ReSEED: Social Event dEtection Dataset</td><td>ReSEED: social event dEtection dataset</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=reseed: social event detection dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>54983972aafc8e149259d913524581357b0f91c3</td></tr><tr><td>saivt</td><td>SAIVT SoftBio</td><td>A Database for Person Re-Identification in Multi-Camera Surveillance Networks</td><td>A Database for Person Re-Identification in Multi-Camera Surveillance Networks</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a database for person re-identification in multi-camera surveillance networks&sort=relevance" target="_blank">[s2]</a></td><td></td><td>22646e00a7ba34d1b5fbe3b1efcd91a1e1be3c2b</td></tr><tr><td>sarc3d</td><td>Sarc3D</td><td>SARC3D: a new 3D body model for People Tracking and Re-identification</td><td>SARC3D: A New 3D Body Model for People Tracking and Re-identification</td><td><a href="https://pdfs.semanticscholar.org/e27e/f52c641c2b5100a1b34fd0b819e84a31b4df.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=sarc3d: a new 3d body model for people tracking and re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>e27ef52c641c2b5100a1b34fd0b819e84a31b4df</td></tr><tr><td>scface</td><td>SCface</td><td>SCface – surveillance cameras face database</td><td>SCface – surveillance cameras face database</td><td><a href="http://scface.org/SCface%20-%20Surveillance%20Cameras%20Face%20Database.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=scface – surveillance cameras face database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>29a705a5fa76641e0d8963f1fdd67ee4c0d92d3d</td></tr><tr><td>scut_fbp</td><td>SCUT-FBP</td><td>SCUT-FBP: A Benchmark Dataset for Facial Beauty Perception</td><td>SCUT-FBP: A Benchmark Dataset for Facial Beauty Perception</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=scut-fbp: a benchmark dataset for facial beauty perception&sort=relevance" target="_blank">[s2]</a></td><td></td><td>bd26dabab576adb6af30484183c9c9c8379bf2e0</td></tr><tr><td>scut_head</td><td>SCUT HEAD</td><td>Detecting Heads using Feature Refine Net and Cascaded Multi-scale Architecture</td><td>Detecting Heads using Feature Refine Net and Cascaded Multi-scale Architecture</td><td><a href="https://arxiv.org/pdf/1803.09256.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=detecting heads using feature refine net and cascaded multi-scale architecture&sort=relevance" target="_blank">[s2]</a></td><td></td><td>d3200d49a19a4a4e4e9745ee39649b65d80c834b</td></tr><tr><td>sdu_vid</td><td>SDU-VID</td><td>A Spatio-Temporal Appearance Representation for Video-Based Pedestrian Re-Identification</td><td>A Spatio-Temporal Appearance Representation for Video-Based Pedestrian Re-Identification</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a spatio-temporal appearance representation for video-based pedestrian re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3b4ec8af470948a72a6ed37a9fd226719a874ebc</td></tr><tr><td>sdu_vid</td><td>SDU-VID</td><td>Local descriptors encoded by Fisher vectors for person re-identification</td><td>Local Descriptors Encoded by Fisher Vectors for Person Re-identification</td><td><a href="https://pdfs.semanticscholar.org/a105/f1ef67b4b02da38eadce8ffb4e13aa301a93.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=local descriptors encoded by fisher vectors for person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>46a01565e6afe7c074affb752e7069ee3bf2e4ef</td></tr><tr><td>sdu_vid</td><td>SDU-VID</td><td>Person reidentification by video ranking</td><td>Person Re-identification by Video Ranking</td><td><a href="https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person reidentification by video ranking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>98bb029afe2a1239c3fdab517323066f0957b81b</td></tr><tr><td>social_relation</td><td>Social Relation</td><td>Learning Social Relation Traits from Face Images</td><td>Learning Social Relation Traits from Face Images</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning social relation traits from face images&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2a171f8d14b6b8735001a11c217af9587d095848</td></tr><tr><td>soton</td><td>SOTON HiD</td><td>On a Large Sequence-Based Human Gait Database</td><td>On a Large Sequence-Based Human Gait Database</td><td><a href="https://pdfs.semanticscholar.org/4f93/cd09785c6e77bf4bc5a788e079df524c8d21.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=on a large sequence-based human gait database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4f93cd09785c6e77bf4bc5a788e079df524c8d21</td></tr><tr><td>sports_videos_in_the_wild</td><td>SVW</td><td>Sports Videos in the Wild (SVW): A Video Dataset for Sports Analysis</td><td>Sports Videos in the Wild (SVW): A video dataset for sports analysis</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=sports videos in the wild (svw): a video dataset for sports analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1a40092b493c6b8840257ab7f96051d1a4dbfeb2</td></tr><tr><td>stair_actions</td><td>STAIR Action</td><td>STAIR Actions: A Video Dataset of Everyday Home Actions</td><td>STAIR Actions: A Video Dataset of Everyday Home Actions</td><td><a href="https://arxiv.org/pdf/1804.04326.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=stair actions: a video dataset of everyday home actions&sort=relevance" target="_blank">[s2]</a></td><td></td><td>d3f5a1848b0028d8ab51d0b0673732cad2e3c8c9</td></tr><tr><td>stanford_drone</td><td>Stanford Drone</td><td>Learning Social Etiquette: Human Trajectory Prediction In Crowded Scenes</td><td>Social LSTM: Human Trajectory Prediction in Crowded Spaces</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning social etiquette: human trajectory prediction in crowded scenes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>570f37ed63142312e6ccdf00ecc376341ec72b9f</td></tr><tr><td>stickmen_buffy</td><td>Buffy Stickmen</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td><a href="http://www.bmva.org/bmvc/2010/conference/paper12/abstract12.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clustered pose and nonlinear appearance models for human pose estimation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td></tr><tr><td>stickmen_buffy</td><td>Buffy Stickmen</td><td>Learning to Parse Images of Articulated Objects</td><td>Learning to parse images of articulated bodies</td><td><a href="http://machinelearning.wustl.edu/mlpapers/paper_files/NIPS2006_899.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning to parse images of articulated objects&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td></tr><tr><td>stickmen_family</td><td>We Are Family Stickmen</td><td>We Are Family: Joint Pose Estimation of Multiple Persons</td><td>We Are Family: Joint Pose Estimation of Multiple Persons</td><td><a href="http://eprints.pascal-network.org/archive/00007964/01/eichner10eccv.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=we are family: joint pose estimation of multiple persons&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0dc11a37cadda92886c56a6fb5191ded62099c28</td></tr><tr><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td>Learning to parse images of articulated bodies</td><td><a href="http://machinelearning.wustl.edu/mlpapers/paper_files/NIPS2006_899.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clustered pose and nonlinear appearance models for human pose estimation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td></tr><tr><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td>Learning to Parse Images of Articulated Objects</td><td>Learning to parse images of articulated bodies</td><td><a href="http://machinelearning.wustl.edu/mlpapers/paper_files/NIPS2006_899.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning to parse images of articulated objects&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td></tr><tr><td>sun_attributes</td><td>SUN</td><td>The SUN Attribute Database: Beyond Categories for Deeper Scene Understanding</td><td>The SUN Attribute Database: Beyond Categories for Deeper Scene Understanding</td><td><a href="http://www.cc.gatech.edu/~hays/papers/attribute_ijcv.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the sun attribute database: beyond categories for deeper scene understanding&sort=relevance" target="_blank">[s2]</a></td><td></td><td>66e6f08873325d37e0ec20a4769ce881e04e964e</td></tr><tr><td>sun_attributes</td><td>SUN</td><td>SUN Attribute Database: Discovering, Annotating, and Recognizing Scene Attributes</td><td>SUN attribute database: Discovering, annotating, and recognizing scene attributes</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=sun attribute database: discovering, annotating, and recognizing scene attributes&sort=relevance" target="_blank">[s2]</a></td><td>Brown University</td><td>833fa04463d90aab4a9fe2870d480f0b40df446e</td></tr><tr><td>svs</td><td>SVS</td><td>Pedestrian Attribute Classification in Surveillance: Database and Evaluation</td><td>Pedestrian Attribute Classification in Surveillance: Database and Evaluation</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pedestrian attribute classification in surveillance: database and evaluation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>488e475eeb3bb39a145f23ede197cd3620f1d98a</td></tr><tr><td>texas_3dfrd</td><td>Texas 3DFRD</td><td>Anthropometric 3D Face Recognition</td><td>Anthropometric 3D Face Recognition</td><td><a href="http://live.ece.utexas.edu/publications/2010/sg_ijcv_june10.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=anthropometric 3d face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2ce2560cf59db59ce313bbeb004e8ce55c5ce928</td></tr><tr><td>texas_3dfrd</td><td>Texas 3DFRD</td><td>Texas 3D Face Recognition Database</td><td>Texas 3D Face Recognition Database</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=texas 3d face recognition database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4d58f886f5150b2d5e48fd1b5a49e09799bf895d</td></tr><tr><td>tiny_faces</td><td>TinyFace</td><td>Low-Resolution Face Recognition</td><td>Low-Resolution Face Recognition</td><td><a href="https://arxiv.org/pdf/1811.08965.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=low-resolution face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8990cdce3f917dad622e43e033db686b354d057c</td></tr><tr><td>tiny_images</td><td>Tiny Images</td><td>80 million tiny images: a large dataset for non-parametric object and scene recognition</td><td>80 Million Tiny Images: A Large Data Set for Nonparametric Object and Scene Recognition</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=80 million tiny images: a large dataset for non-parametric object and scene recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>31b58ced31f22eab10bd3ee2d9174e7c14c27c01</td></tr><tr><td>tisi</td><td>Times Square Intersection</td><td>Video Synopsis by Heterogeneous Multi-source Correlation</td><td>Video Synopsis by Heterogeneous Multi-source Correlation</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=video synopsis by heterogeneous multi-source correlation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>b6c293f0420f7e945b5916ae44269fb53e139275</td></tr><tr><td>tisi</td><td>Times Square Intersection</td><td>Learning from Multiple Sources for Video Summarisation</td><td>Learning from Multiple Sources for Video Summarisation</td><td><a href="https://arxiv.org/pdf/1501.03069.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning from multiple sources for video summarisation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>287ddcb3db5562235d83aee318f318b8d5e43fb1</td></tr><tr><td>oxford_town_centre</td><td>TownCentre</td><td>Stable Multi-Target Tracking in Real-Time Surveillance Video</td><td>Stable multi-target tracking in real-time surveillance video</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=stable multi-target tracking in real-time surveillance video&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9361b784e73e9238d5cefbea5ac40d35d1e3103f</td></tr><tr><td>tud_brussels</td><td>TUD-Brussels</td><td>Multi-Cue Onboard Pedestrian Detection</td><td>Multi-cue onboard pedestrian detection</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-cue onboard pedestrian detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6ad5a38df8dd4cdddd74f31996ce096d41219f72</td></tr><tr><td>tud_campus</td><td>TUD-Campus</td><td>People-Tracking-by-Detection and People-Detection-by-Tracking</td><td>People-tracking-by-detection and people-detection-by-tracking</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=people-tracking-by-detection and people-detection-by-tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td></tr><tr><td>tud_crossing</td><td>TUD-Crossing</td><td>People-Tracking-by-Detection and People-Detection-by-Tracking</td><td>People-tracking-by-detection and people-detection-by-tracking</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=people-tracking-by-detection and people-detection-by-tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td></tr><tr><td>tud_motionpairs</td><td>TUD-Motionparis</td><td>Multi-Cue Onboard Pedestrian Detection</td><td>Multi-cue onboard pedestrian detection</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-cue onboard pedestrian detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6ad5a38df8dd4cdddd74f31996ce096d41219f72</td></tr><tr><td>tud_multiview</td><td>TUD-Multiview</td><td>Monocular 3D Pose Estimation and Tracking by Detection</td><td>Monocular 3D pose estimation and tracking by detection</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=monocular 3d pose estimation and tracking by detection&sort=relevance" target="_blank">[s2]</a></td><td>TU Darmstadt</td><td>436f798d1a4e54e5947c1e7d7375c31b2bdb4064</td></tr><tr><td>tud_pedestrian</td><td>TUD-Pedestrian</td><td>People-Tracking-by-Detection and People-Detection-by-Tracking</td><td>People-tracking-by-detection and people-detection-by-tracking</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=people-tracking-by-detection and people-detection-by-tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td></tr><tr><td>tud_stadtmitte</td><td>TUD-Stadtmitte</td><td>Monocular 3D Pose Estimation and Tracking by Detection</td><td>Monocular 3D pose estimation and tracking by detection</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=monocular 3d pose estimation and tracking by detection&sort=relevance" target="_blank">[s2]</a></td><td>TU Darmstadt</td><td>436f798d1a4e54e5947c1e7d7375c31b2bdb4064</td></tr><tr><td>tvhi</td><td>TVHI</td><td>High Five: Recognising human interactions in TV shows</td><td>High Five: Recognising human interactions in TV shows</td><td><a href="https://pdfs.semanticscholar.org/3cd4/0bfa1ff193a96bde0207e5140a399476466c.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=high five: recognising human interactions in tv shows&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3cd40bfa1ff193a96bde0207e5140a399476466c</td></tr><tr><td>uccs</td><td>UCCS</td><td>Large scale unconstrained open set face database</td><td>Large scale unconstrained open set face database</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=large scale unconstrained open set face database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>07fcbae86f7a3ad3ea1cf95178459ee9eaf77cb1</td></tr><tr><td>uccs</td><td>UCCS</td><td>Unconstrained Face Detection and Open-Set Face Recognition Challenge</td><td>Unconstrained Face Detection and Open-Set Face Recognition Challenge</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=unconstrained face detection and open-set face recognition challenge&sort=relevance" target="_blank">[s2]</a></td><td></td><td>d4f1eb008eb80595bcfdac368e23ae9754e1e745</td></tr><tr><td>ucf_101</td><td>UCF101</td><td>UCF101: A Dataset of 101 Human Actions Classes From Videos in The Wild</td><td>UCF101: A Dataset of 101 Human Actions Classes From Videos in The Wild</td><td><a href="https://arxiv.org/pdf/1212.0402.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=ucf101: a dataset of 101 human actions classes from videos in the wild&sort=relevance" target="_blank">[s2]</a></td><td></td><td>b5f2846a506fc417e7da43f6a7679146d99c5e96</td></tr><tr><td>ucf_crowd</td><td>UCF-CC-50</td><td>Multi-Source Multi-Scale Counting in Extremely Dense Crowd Images</td><td>Multi-source Multi-scale Counting in Extremely Dense Crowd Images</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-source multi-scale counting in extremely dense crowd images&sort=relevance" target="_blank">[s2]</a></td><td></td><td>32c801cb7fbeb742edfd94cccfca4934baec71da</td></tr><tr><td>ucf_selfie</td><td>UCF Selfie</td><td>How to Take a Good Selfie?</td><td>How to Take a Good Selfie?</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=how to take a good selfie?&sort=relevance" target="_blank">[s2]</a></td><td></td><td>041d3eedf5e45ce5c5229f0181c5c576ed1fafd6</td></tr><tr><td>ufdd</td><td>UFDD</td><td>Pushing the Limits of Unconstrained Face Detection: a Challenge Dataset and Baseline Results</td><td>Pushing the Limits of Unconstrained Face Detection: a Challenge Dataset and Baseline Results</td><td><a href="https://arxiv.org/pdf/1804.10275.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pushing the limits of unconstrained face detection: a challenge dataset and baseline results&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3531332efe19be21e7401ba1f04570a142617236</td></tr><tr><td>umb</td><td>UMB</td><td>UMB-DB: A Database of Partially Occluded 3D Faces</td><td>UMB-DB: A database of partially occluded 3D faces</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=umb-db: a database of partially occluded 3d faces&sort=relevance" target="_blank">[s2]</a></td><td></td><td>16e8b0a1e8451d5f697b94c0c2b32a00abee1d52</td></tr><tr><td>umd_faces</td><td>UMD</td><td>UMDFaces: An Annotated Face Dataset for Training Deep Networks</td><td>UMDFaces: An annotated face dataset for training deep networks</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=umdfaces: an annotated face dataset for training deep networks&sort=relevance" target="_blank">[s2]</a></td><td></td><td>31b05f65405534a696a847dd19c621b7b8588263</td></tr><tr><td>umd_faces</td><td>UMD</td><td>The Do's and Don'ts for CNN-based Face Verification</td><td>The Do’s and Don’ts for CNN-Based Face Verification</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the do's and don'ts for cnn-based face verification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>71b7fc715e2f1bb24c0030af8d7e7b6e7cd128a6</td></tr><tr><td>unbc_shoulder_pain</td><td>UNBC-McMaster Pain</td><td>PAINFUL DATA: The UNBC-McMaster Shoulder Pain Expression Archive Database</td><td>Painful data: The UNBC-McMaster shoulder pain expression archive database</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=painful data: the unbc-mcmaster shoulder pain expression archive database&sort=relevance" target="_blank">[s2]</a></td><td>Carnegie Mellon University</td><td>56ffa7d906b08d02d6d5a12c7377a57e24ef3391</td></tr><tr><td>urban_tribes</td><td>Urban Tribes</td><td>From Bikers to Surfers: Visual Recognition of Urban Tribes</td><td>From Bikers to Surfers: Visual Recognition of Urban Tribes</td><td><a href="http://vision.cornell.edu/se3/wp-content/uploads/2014/09/utribes_bmvc13_final.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=from bikers to surfers: visual recognition of urban tribes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>774cbb45968607a027ae4729077734db000a1ec5</td></tr><tr><td>used</td><td>USED Social Event Dataset</td><td>USED: A Large-scale Social Event Detection Dataset</td><td>USED: a large-scale social event detection dataset</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=used: a large-scale social event detection dataset&sort=relevance" target="_blank">[s2]</a></td><td>University of Trento</td><td>8627f019882b024aef92e4eb9355c499c733e5b7</td></tr><tr><td>v47</td><td>V47</td><td>Re-identification of Pedestrians with Variable Occlusion and Scale</td><td>Re-identification of pedestrians with variable occlusion and scale</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=re-identification of pedestrians with variable occlusion and scale&sort=relevance" target="_blank">[s2]</a></td><td>Kingston University</td><td>922e0a51a3b8c67c4c6ac09a577ff674cbd28b34</td></tr><tr><td>vadana</td><td>VADANA</td><td>VADANA: A dense dataset for facial image analysis</td><td>VADANA: A dense dataset for facial image analysis</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=vadana: a dense dataset for facial image analysis&sort=relevance" target="_blank">[s2]</a></td><td>University of Delaware</td><td>4563b46d42079242f06567b3f2e2f7a80cb3befe</td></tr><tr><td>vgg_celebs_in_places</td><td>CIP</td><td>Faces in Places: Compound Query Retrieval</td><td>Faces in Places: compound query retrieval</td><td><a href="https://pdfs.semanticscholar.org/7ebb/153704706e457ab57b432793d2b6e5d12592.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=faces in places: compound query retrieval&sort=relevance" target="_blank">[s2]</a></td><td></td><td>7ebb153704706e457ab57b432793d2b6e5d12592</td></tr><tr><td>vgg_faces</td><td>VGG Face</td><td>Deep Face Recognition</td><td>Deep Face Recognition</td><td><a href="https://pdfs.semanticscholar.org/f372/ab9b3270d4e4f6a0258c83c2736c3a5c0454.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=deep face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>162ea969d1929ed180cc6de9f0bf116993ff6e06</td></tr><tr><td>vgg_faces2</td><td>VGG Face2</td><td>VGGFace2: A dataset for recognising faces across pose and age</td><td>VGGFace2: A Dataset for Recognising Faces across Pose and Age</td><td><a href="https://arxiv.org/pdf/1710.08092.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=vggface2: a dataset for recognising faces across pose and age&sort=relevance" target="_blank">[s2]</a></td><td></td><td>70c59dc3470ae867016f6ab0e008ac8ba03774a1</td></tr><tr><td>violent_flows</td><td>Violent Flows</td><td>Violent Flows: Real-Time Detection of Violent Crowd Behavior</td><td>Violent flows: Real-time detection of violent crowd behavior</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=violent flows: real-time detection of violent crowd behavior&sort=relevance" target="_blank">[s2]</a></td><td>Open University of Israel</td><td>5194cbd51f9769ab25260446b4fa17204752e799</td></tr><tr><td>viper</td><td>VIPeR</td><td>Evaluating Appearance Models for Recognition, Reacquisition, and Tracking</td><td>Evaluating Appearance Models for Recognition, Reacquisition, and Tracking</td><td><a href="https://pdfs.semanticscholar.org/7847/b1fbccadb780b655e72c66d3f9e93ddb880c.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=evaluating appearance models for recognition, reacquisition, and tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6273b3491e94ea4dd1ce42b791d77bdc96ee73a8</td></tr><tr><td>visual_phrases</td><td>Phrasal Recognition</td><td>Recognition using Visual Phrases</td><td>Recognition using visual phrases</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=recognition using visual phrases&sort=relevance" target="_blank">[s2]</a></td><td>University of Illinois, Urbana-Champaign</td><td>e8de844fefd54541b71c9823416daa238be65546</td></tr><tr><td>vmu</td><td>VMU</td><td>Can Facial Cosmetics Affect the Matching Accuracy of Face Recognition Systems?</td><td>Can facial cosmetics affect the matching accuracy of face recognition systems?</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=can facial cosmetics affect the matching accuracy of face recognition systems?&sort=relevance" target="_blank">[s2]</a></td><td>West Virginia University</td><td>37d6f0eb074d207b53885bd2eb78ccc8a04be597</td></tr><tr><td>voc</td><td>VOC</td><td>The PASCAL Visual Object Classes (VOC) Challenge</td><td>The Pascal Visual Object Classes (VOC) Challenge</td><td><a href="http://eprints.pascal-network.org/archive/00006187/01/PascalVOC_IJCV2009.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the pascal visual object classes (voc) challenge&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0ee1916a0cb2dc7d3add086b5f1092c3d4beb38a</td></tr><tr><td>voxceleb2</td><td>VoxCeleb2</td><td>VoxCeleb2: Deep Speaker Recognition</td><td>VoxCeleb2: Deep Speaker Recognition.</td><td><a href="https://pdfs.semanticscholar.org/8875/ae233bc074f5cd6c4ebba447b536a7e847a5.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=voxceleb2: deep speaker recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8875ae233bc074f5cd6c4ebba447b536a7e847a5</td></tr><tr><td>vqa</td><td>VQA</td><td>VQA: Visual Question Answering</td><td>VQA: Visual Question Answering</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=vqa: visual question answering&sort=relevance" target="_blank">[s2]</a></td><td></td><td>01959ef569f74c286956024866c1d107099199f7</td></tr><tr><td>ward</td><td>WARD</td><td>Re-identify people in wide area camera network</td><td>Re-identify people in wide area camera network</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=re-identify people in wide area camera network&sort=relevance" target="_blank">[s2]</a></td><td>University of Udine</td><td>6f3c76b7c0bd8e1d122c6ea808a271fd4749c951</td></tr><tr><td>who_goes_there</td><td>WGT</td><td>Who Goes There? Approaches to Mapping Facial Appearance Diversity</td><td>Who goes there?: approaches to mapping facial appearance diversity</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=who goes there? approaches to mapping facial appearance diversity&sort=relevance" target="_blank">[s2]</a></td><td>University of Kentucky</td><td>9b9bf5e623cb8af7407d2d2d857bc3f1b531c182</td></tr><tr><td>wider</td><td>WIDER</td><td>Recognize Complex Events from Static Images by Fusing Deep Channels</td><td>Recognize complex events from static images by fusing deep channels</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=recognize complex events from static images by fusing deep channels&sort=relevance" target="_blank">[s2]</a></td><td></td><td>356b431d4f7a2a0a38cf971c84568207dcdbf189</td></tr><tr><td>wider_attribute</td><td>WIDER Attribute</td><td>Human Attribute Recognition by Deep Hierarchical Contexts</td><td>Human Attribute Recognition by Deep Hierarchical Contexts</td><td><a href="https://pdfs.semanticscholar.org/8e28/07f2dd53b03a759e372e07f7191cae65c9fd.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=human attribute recognition by deep hierarchical contexts&sort=relevance" target="_blank">[s2]</a></td><td></td><td>44d23df380af207f5ac5b41459c722c87283e1eb</td></tr><tr><td>wider_face</td><td>WIDER FACE</td><td>WIDER FACE: A Face Detection Benchmark</td><td>WIDER FACE: A Face Detection Benchmark</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=wider face: a face detection benchmark&sort=relevance" target="_blank">[s2]</a></td><td></td><td>52d7eb0fbc3522434c13cc247549f74bb9609c5d</td></tr><tr><td>wildtrack</td><td>WildTrack</td><td>WILDTRACK: A Multi-camera HD Dataset for Dense Unscripted Pedestrian Detection</td><td>WILDTRACK: A Multi-camera HD Dataset for Dense Unscripted Pedestrian Detection</td><td><a href="http://openaccess.thecvf.com/content_cvpr_2018/Supplemental/1562-supp.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=wildtrack: a multi-camera hd dataset for dense unscripted pedestrian detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>36bccfb2ad847096bc76777e544f305813cd8f5b</td></tr><tr><td>wlfdb</td><td>WLFDB</td><td>WLFDB: Weakly Labeled Face Databases</td><td>WLFDB : Weakly Labeled Face Databases</td><td><a href="https://pdfs.semanticscholar.org/5ad4/e9f947c1653c247d418f05dad758a3f9277b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=wlfdb: weakly labeled face databases&sort=relevance" target="_blank">[s2]</a></td><td></td><td>5ad4e9f947c1653c247d418f05dad758a3f9277b</td></tr><tr><td>yale_faces</td><td>YaleFaces</td><td>From Few to Many: Illumination Cone Models for Face Recognition under Variable Lighting and Pose</td><td>From Few to Many: Illumination Cone Models for Face Recognition under Variable Lighting and Pose</td><td><a href="http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.147.1487&rep=rep1&type=pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=from few to many: illumination cone models for face recognition under variable lighting and pose&sort=relevance" target="_blank">[s2]</a></td><td></td><td>18c72175ddbb7d5956d180b65a96005c100f6014</td></tr><tr><td>yale_faces</td><td>YaleFaces</td><td>Acquiring Linear Subspaces for Face Recognition under Variable Lighting</td><td>Acquiring linear subspaces for face recognition under variable lighting</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=acquiring linear subspaces for face recognition under variable lighting&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2ad0ee93d029e790ebb50574f403a09854b65b7e</td></tr><tr><td>yawdd</td><td>YawDD</td><td>YawDD: A Yawning Detection Dataset</td><td>YawDD: a yawning detection dataset</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=yawdd: a yawning detection dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a94cae786d515d3450d48267e12ca954aab791c4</td></tr><tr><td>yfcc_100m</td><td>YFCC100M</td><td>YFCC100M: The New Data in Multimedia Research</td><td>YFCC100M: the new data in multimedia research</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=yfcc100m: the new data in multimedia research&sort=relevance" target="_blank">[s2]</a></td><td></td><td>010f0f4929e6a6644fb01f0e43820f91d0fad292</td></tr><tr><td>york_3d</td><td>UOY 3D Face Database</td><td>Three-Dimensional Face Recognition: An Eigensurface Approach</td><td>Three-dimensional face recognition: an eigensurface approach</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=three-dimensional face recognition: an eigensurface approach&sort=relevance" target="_blank">[s2]</a></td><td></td><td>19d1b811df60f86cbd5e04a094b07f32fff7a32a</td></tr><tr><td>youtube_celebrities</td><td>YouTube Celebrities</td><td>Face Tracking and Recognition with Visual Constraints in Real-World Videos</td><td>Face tracking and recognition with visual constraints in real-world videos</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=face tracking and recognition with visual constraints in real-world videos&sort=relevance" target="_blank">[s2]</a></td><td>Rutgers University</td><td>6204776d31359d129a582057c2d788a14f8aadeb</td></tr><tr><td>youtube_faces</td><td>YouTubeFaces</td><td>Face Recognition in Unconstrained Videos with Matched Background Similarity</td><td>Face recognition in unconstrained videos with matched background similarity</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=face recognition in unconstrained videos with matched background similarity&sort=relevance" target="_blank">[s2]</a></td><td></td><td>560e0e58d0059259ddf86fcec1fa7975dee6a868</td></tr><tr><td>youtube_makeup</td><td>YMU</td><td>Can Facial Cosmetics Affect the Matching Accuracy of Face Recognition Systems?</td><td>Can facial cosmetics affect the matching accuracy of face recognition systems?</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=can facial cosmetics affect the matching accuracy of face recognition systems?&sort=relevance" target="_blank">[s2]</a></td><td>West Virginia University</td><td>37d6f0eb074d207b53885bd2eb78ccc8a04be597</td></tr><tr><td>youtube_makeup</td><td>YMU</td><td>Automatic Facial Makeup Detection with Application in Face Recognition</td><td>Automatic facial makeup detection with application in face recognition</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=automatic facial makeup detection with application in face recognition&sort=relevance" target="_blank">[s2]</a></td><td>West Virginia University</td><td>fcc6fe6007c322641796cb8792718641856a22a7</td></tr><tr><td>youtube_poses</td><td>YouTube Pose</td><td>Personalizing Human Video Pose Estimation</td><td>Personalizing Human Video Pose Estimation</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=personalizing human video pose estimation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1c2802c2199b6d15ecefe7ba0c39bfe44363de38</td></tr></table></body></html>
\ No newline at end of file diff --git a/scraper/reports/paper_title_report_no_location.html b/scraper/reports/paper_title_report_no_location.html index be1c5464..3258e760 100644 --- a/scraper/reports/paper_title_report_no_location.html +++ b/scraper/reports/paper_title_report_no_location.html @@ -1 +1 @@ -<!doctype html><html><head><meta charset='utf-8'><title>Papers with no location</title><link rel='stylesheet' href='reports.css'></head><body><h2>Papers with no location</h2><table border='1' cellpadding='3' cellspacing='3'><th>key</th><th>name</th><th>our title</th><th>found title</th><th></th><th></th><th>address</th><th>s2 id</th><tr><td>brainwash</td><td>Brainwash</td><td>End-to-End People Detection in Crowded Scenes</td><td>End-to-End People Detection in Crowded Scenes</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=end-to-end people detection in crowded scenes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1bd1645a629f1b612960ab9bba276afd4cf7c666</td></tr></table></body></html>
\ No newline at end of file +<!doctype html><html><head><meta charset='utf-8'><title>Papers with no location</title><link rel='stylesheet' href='reports.css'></head><body><h2>Papers with no location</h2><table border='1' cellpadding='3' cellspacing='3'><th>key</th><th>name</th><th>our title</th><th>found title</th><th></th><th></th><th>address</th><th>s2 id</th><tr><td>10k_US_adult_faces</td><td>10K US Adult Faces</td><td>The intrinsic memorability of face images</td><td>The intrinsic memorability of face photographs.</td><td><a href="https://pdfs.semanticscholar.org/8b2d/d5c61b23ead5ae5508bb8ce808b5ea266730.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the intrinsic memorability of face images&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8b2dd5c61b23ead5ae5508bb8ce808b5ea266730</td></tr><tr><td>3d_rma</td><td>3D-RMA</td><td>Automatic 3D Face Authentication</td><td>Automatic 3D face authentication</td><td><a href="http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.31.9190&rep=rep1&type=pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=automatic 3d face authentication&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2160788824c4c29ffe213b2cbeb3f52972d73f37</td></tr><tr><td>3dddb_unconstrained</td><td>3D Dynamic</td><td>A 3D Dynamic Database for Unconstrained Face Recognition</td><td>A 3D Dynamic Database for Unconstrained Face Recognition</td><td><a href="https://pdfs.semanticscholar.org/4d4b/b462c9f1d4e4ab1e4aa6a75cc0bc71b38461.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a 3d dynamic database for unconstrained face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4d4bb462c9f1d4e4ab1e4aa6a75cc0bc71b38461</td></tr><tr><td>3dpes</td><td>3DPeS</td><td>3DPes: 3D People Dataset for Surveillance and Forensics</td><td>3DPeS: 3D people dataset for surveillance and forensics</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=3dpes: 3d people dataset for surveillance and forensics&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2e8d0f1802e50cccfd3c0aabac0d0beab3a7846e</td></tr><tr><td>4dfab</td><td>4DFAB</td><td>4DFAB: A Large Scale 4D Facial Expression Database for Biometric Applications</td><td>4DFAB: A Large Scale 4D Facial Expression Database for Biometric Applications</td><td><a href="https://arxiv.org/pdf/1712.01443.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=4dfab: a large scale 4d facial expression database for biometric applications&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a40f9bfd3c45658ee8da70e1f2dfbe1f0c744d43</td></tr><tr><td>fpoq</td><td>50 People One Question</td><td>Merging Pose Estimates Across Space and Time</td><td>Merging Pose Estimates Across Space and Time</td><td><a href="http://authors.library.caltech.edu/41565/1/tracking_bmvc.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=merging pose estimates across space and time&sort=relevance" target="_blank">[s2]</a></td><td></td><td>5753b2b5e442eaa3be066daa4a2ca8d8a0bb1725</td></tr><tr><td>adience</td><td>Adience</td><td>Age and Gender Estimation of Unfiltered Faces</td><td>Age and Gender Estimation of Unfiltered Faces</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=age and gender estimation of unfiltered faces&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1be498d4bbc30c3bfd0029114c784bc2114d67c0</td></tr><tr><td>afad</td><td>AFAD</td><td>Ordinal Regression with a Multiple Output CNN for Age Estimation</td><td>Ordinal Regression with Multiple Output CNN for Age Estimation</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=ordinal regression with a multiple output cnn for age estimation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6618cff7f2ed440a0d2fa9e74ad5469df5cdbe4c</td></tr><tr><td>afew_va</td><td>AFEW-VA</td><td>AFEW-VA database for valence and arousal estimation in-the-wild</td><td>AFEW-VA database for valence and arousal estimation in-the-wild</td><td><a href="https://ibug.doc.ic.ac.uk/media/uploads/documents/afew-va.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=afew-va database for valence and arousal estimation in-the-wild&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2624d84503bc2f8e190e061c5480b6aa4d89277a</td></tr><tr><td>affectnet</td><td>AffectNet</td><td>AffectNet: A Database for Facial Expression, Valence, and Arousal Computing in the Wild</td><td>AffectNet: A Database for Facial Expression, Valence, and Arousal Computing in the Wild</td><td><a href="https://arxiv.org/pdf/1708.03985.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=affectnet: a database for facial expression, valence, and arousal computing in the wild&sort=relevance" target="_blank">[s2]</a></td><td></td><td>758d7e1be64cc668c59ef33ba8882c8597406e53</td></tr><tr><td>aflw</td><td>AFLW</td><td>Annotated Facial Landmarks in the Wild: A Large-scale, Real-world Database for Facial Landmark Localization</td><td>Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=annotated facial landmarks in the wild: a large-scale, real-world database for facial landmark localization&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td></tr><tr><td>afw</td><td>AFW</td><td>Face detection, pose estimation and landmark localization in the wild</td><td>Face detection, pose estimation, and landmark localization in the wild</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=face detection, pose estimation and landmark localization in the wild&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0e986f51fe45b00633de9fd0c94d082d2be51406</td></tr><tr><td>agedb</td><td>AgeDB</td><td>AgeDB: the first manually collected, in-the-wild age database</td><td>AgeDB: The First Manually Collected, In-the-Wild Age Database</td><td><a href="http://eprints.mdx.ac.uk/22044/1/agedb_kotsia.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=agedb: the first manually collected, in-the-wild age database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>d818568838433a6d6831adde49a58cef05e0c89f</td></tr><tr><td>alert_airport</td><td>ALERT Airport</td><td>A Systematic Evaluation and Benchmark for Person Re-Identification: Features, Metrics, and Datasets</td><td>A Systematic Evaluation and Benchmark for Person Re-Identification: Features, Metrics, and Datasets</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a systematic evaluation and benchmark for person re-identification: features, metrics, and datasets&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6403117f9c005ae81f1e8e6d1302f4a045e3d99d</td></tr><tr><td>am_fed</td><td>AM-FED</td><td>Affectiva MIT Facial Expression Dataset (AM-FED): Naturalistic and Spontaneous Facial Expressions Collected “In the Wild”</td><td>Affectiva-MIT Facial Expression Dataset (AM-FED): Naturalistic and Spontaneous Facial Expressions Collected "In-the-Wild"</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=affectiva mit facial expression dataset (am-fed): naturalistic and spontaneous facial expressions collected “in the wild”&sort=relevance" target="_blank">[s2]</a></td><td></td><td>47aeb3b82f54b5ae8142b4bdda7b614433e69b9a</td></tr><tr><td>apis</td><td>APiS1.0</td><td>Pedestrian Attribute Classification in Surveillance: Database and Evaluation</td><td>Pedestrian Attribute Classification in Surveillance: Database and Evaluation</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pedestrian attribute classification in surveillance: database and evaluation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>488e475eeb3bb39a145f23ede197cd3620f1d98a</td></tr><tr><td>appa_real</td><td>APPA-REAL</td><td>Apparent and Real Age Estimation in Still Images with Deep Residual Regressors on Appa-Real Database</td><td>Apparent and Real Age Estimation in Still Images with Deep Residual Regressors on Appa-Real Database</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=apparent and real age estimation in still images with deep residual regressors on appa-real database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>633c851ebf625ad7abdda2324e9de093cf623141</td></tr><tr><td>appa_real</td><td>APPA-REAL</td><td>From Apparent to Real Age: Gender, Age, Ethnic, Makeup, and Expression Bias Analysis in Real Age Estimation</td><td>From Apparent to Real Age: Gender, Age, Ethnic, Makeup, and Expression Bias Analysis in Real Age Estimation</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=from apparent to real age: gender, age, ethnic, makeup, and expression bias analysis in real age estimation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>7b92d1e53cc87f7a4256695de590098a2f30261e</td></tr><tr><td>ar_facedb</td><td>AR Face</td><td>The AR Face Database</td><td>The AR face database</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the ar face database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6d96f946aaabc734af7fe3fc4454cf8547fcd5ed</td></tr><tr><td>awe_ears</td><td>AWE Ears</td><td>Ear Recognition: More Than a Survey</td><td>Ear Recognition: More Than a Survey</td><td><a href="https://arxiv.org/pdf/1611.06203.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=ear recognition: more than a survey&sort=relevance" target="_blank">[s2]</a></td><td></td><td>84fe5b4ac805af63206012d29523a1e033bc827e</td></tr><tr><td>b3d_ac</td><td>B3D(AC)</td><td>A 3-D Audio-Visual Corpus of Affective Communication</td><td>A 3-D Audio-Visual Corpus of Affective Communication</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a 3-d audio-visual corpus of affective communication&sort=relevance" target="_blank">[s2]</a></td><td></td><td>d08cc366a4a0192a01e9a7495af1eb5d9f9e73ae</td></tr><tr><td>bbc_pose</td><td>BBC Pose</td><td>Automatic and Efficient Human Pose Estimation for Sign Language Videos</td><td>Automatic and Efficient Human Pose Estimation for Sign Language Videos</td><td><a href="http://tomas.pfister.fi/files/charles13ijcv.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=automatic and efficient human pose estimation for sign language videos&sort=relevance" target="_blank">[s2]</a></td><td></td><td>213a579af9e4f57f071b884aa872651372b661fd</td></tr><tr><td>bfm</td><td>BFM</td><td>A 3D Face Model for Pose and Illumination Invariant Face Recognition</td><td>A 3D Face Model for Pose and Illumination Invariant Face Recognition</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a 3d face model for pose and illumination invariant face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>639937b3a1b8bded3f7e9a40e85bd3770016cf3c</td></tr><tr><td>bio_id</td><td>BioID Face</td><td>Robust Face Detection Using the Hausdorff Distance</td><td>Robust Face Detection Using the Hausdorff Distance</td><td><a href="https://pdfs.semanticscholar.org/4053/e3423fb70ad9140ca89351df49675197196a.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=robust face detection using the hausdorff distance&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4053e3423fb70ad9140ca89351df49675197196a</td></tr><tr><td>bosphorus</td><td>The Bosphorus</td><td>Bosphorus Database for 3D Face Analysis</td><td>Bosphorus Database for 3D Face Analysis</td><td><a href="https://pdfs.semanticscholar.org/4254/fbba3846008f50671edc9cf70b99d7304543.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=bosphorus database for 3d face analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2acf7e58f0a526b957be2099c10aab693f795973</td></tr><tr><td>bp4d_plus</td><td>BP4D+</td><td>Multimodal Spontaneous Emotion Corpus for Human Behavior Analysis</td><td>Multimodal Spontaneous Emotion Corpus for Human Behavior Analysis</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multimodal spontaneous emotion corpus for human behavior analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>53ae38a6bb2b21b42bac4f0c4c8ed1f9fa02f9d4</td></tr><tr><td>bpad</td><td>BPAD</td><td>Describing People: A Poselet-Based Approach to Attribute Classification</td><td>Describing people: A poselet-based approach to attribute classification</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=describing people: a poselet-based approach to attribute classification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>7808937b46acad36e43c30ae4e9f3fd57462853d</td></tr><tr><td>brainwash</td><td>Brainwash</td><td>End-to-End People Detection in Crowded Scenes</td><td>End-to-End People Detection in Crowded Scenes</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=end-to-end people detection in crowded scenes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1bd1645a629f1b612960ab9bba276afd4cf7c666</td></tr><tr><td>bu_3dfe</td><td>BU-3DFE</td><td>A 3D Facial Expression Database For Facial Behavior Research</td><td>A 3D facial expression database for facial behavior research</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a 3d facial expression database for facial behavior research&sort=relevance" target="_blank">[s2]</a></td><td></td><td>cc589c499dcf323fe4a143bbef0074c3e31f9b60</td></tr><tr><td>cacd</td><td></td><td>Cross-Age Reference Coding for Age-Invariant Face Recognition and Retrieval</td><td>Cross-Age Reference Coding for Age-Invariant Face Recognition and Retrieval</td><td><a href="https://pdfs.semanticscholar.org/c44c/84540db1c38ace232ef34b03bda1c81ba039.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=cross-age reference coding for age-invariant face recognition and retrieval&sort=relevance" target="_blank">[s2]</a></td><td></td><td>c44c84540db1c38ace232ef34b03bda1c81ba039</td></tr><tr><td>cafe</td><td>#N/A</td><td>The Child Affective Facial Expression (CAFE) Set: Validity and reliability from untrained adults</td><td>The Child Affective Facial Expression (CAFE) set: validity and reliability from untrained adults</td><td><a href="https://pdfs.semanticscholar.org/2038/8099cc415c772926e47bcbbe554e133343d1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the child affective facial expression (cafe) set: validity and reliability from untrained adults&sort=relevance" target="_blank">[s2]</a></td><td></td><td>20388099cc415c772926e47bcbbe554e133343d1</td></tr><tr><td>caltech_10k_web_faces</td><td>Caltech 10K Web Faces</td><td>Pruning Training Sets for Learning of Object Categories</td><td>Pruning training sets for learning of object categories</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pruning training sets for learning of object categories&sort=relevance" target="_blank">[s2]</a></td><td></td><td>636b8ffc09b1b23ff714ac8350bb35635e49fa3c</td></tr><tr><td>caltech_crp</td><td>Caltech CRP</td><td>Fine-grained classification of pedestrians in video: Benchmark and state of the art</td><td>Fine-grained classification of pedestrians in video: Benchmark and state of the art</td><td><a href="https://arxiv.org/pdf/1605.06177.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=fine-grained classification of pedestrians in video: benchmark and state of the art&sort=relevance" target="_blank">[s2]</a></td><td></td><td>060820f110a72cbf02c14a6d1085bd6e1d994f6a</td></tr><tr><td>caltech_pedestrians</td><td>Caltech Pedestrians</td><td>Pedestrian Detection: A Benchmark</td><td>Pedestrian detection: A benchmark</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pedestrian detection: a benchmark&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1dc35905a1deff8bc74688f2d7e2f48fd2273275</td></tr><tr><td>cas_peal</td><td>CAS-PEAL</td><td>The CAS-PEAL Large-Scale Chinese Face Database and Baseline Evaluations</td><td>The CAS-PEAL Large-Scale Chinese Face Database and Baseline Evaluations</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the cas-peal large-scale chinese face database and baseline evaluations&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2485c98aa44131d1a2f7d1355b1e372f2bb148ad</td></tr><tr><td>casablanca</td><td>Casablanca</td><td>Context-aware {CNNs} for person head detection</td><td>Context-Aware CNNs for Person Head Detection</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=context-aware {cnns} for person head detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0ceda9dae8b9f322df65ca2ef02caca9758aec6f</td></tr><tr><td>casia_webface</td><td>CASIA Webface</td><td>Learning Face Representation from Scratch</td><td>Learning Face Representation from Scratch</td><td><a href="https://arxiv.org/pdf/1411.7923.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning face representation from scratch&sort=relevance" target="_blank">[s2]</a></td><td></td><td>853bd61bc48a431b9b1c7cab10c603830c488e39</td></tr><tr><td>celeba</td><td>CelebA</td><td>Deep Learning Face Attributes in the Wild</td><td>Deep Learning Face Attributes in the Wild</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=deep learning face attributes in the wild&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6424b69f3ff4d35249c0bb7ef912fbc2c86f4ff4</td></tr><tr><td>cfd</td><td>CFD</td><td>The Chicago face database: A free stimulus set of faces and norming data</td><td>The Chicago face database: A free stimulus set of faces and norming data.</td><td><a href="https://pdfs.semanticscholar.org/4df3/143922bcdf7db78eb91e6b5359d6ada004d2.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the chicago face database: a free stimulus set of faces and norming data&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4df3143922bcdf7db78eb91e6b5359d6ada004d2</td></tr><tr><td>chalearn</td><td>ChaLearn</td><td>ChaLearn Looking at People: A Review of Events and Resources</td><td>ChaLearn looking at people: A review of events and resources</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=chalearn looking at people: a review of events and resources&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8d5998cd984e7cce307da7d46f155f9db99c6590</td></tr><tr><td>chokepoint</td><td>ChokePoint</td><td>Patch-based Probabilistic Image Quality Assessment for Face Selection and Improved Video-based Face Recognition</td><td>Patch-based probabilistic image quality assessment for face selection and improved video-based face recognition</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=patch-based probabilistic image quality assessment for face selection and improved video-based face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0486214fb58ee9a04edfe7d6a74c6d0f661a7668</td></tr><tr><td>clothing_co_parsing</td><td>CCP</td><td>Clothing Co-Parsing by Joint Image Segmentation and Labeling</td><td>Clothing Co-parsing by Joint Image Segmentation and Labeling</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clothing co-parsing by joint image segmentation and labeling&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2bf8541199728262f78d4dced6fb91479b39b738</td></tr><tr><td>cmdp</td><td>CMDP</td><td>Distance Estimation of an Unknown Person from a Portrait</td><td>Distance Estimation of an Unknown Person from a Portrait</td><td><a href="http://authors.library.caltech.edu/49084/13/FaceDistanceEstimation_RONCHI.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=distance estimation of an unknown person from a portrait&sort=relevance" target="_blank">[s2]</a></td><td></td><td>56ae6d94fc6097ec4ca861f0daa87941d1c10b70</td></tr><tr><td>cmu_pie</td><td>CMU PIE</td><td>The CMU Pose, Illumination, and Expression Database</td><td>The CMU Pose, Illumination, and Expression (PIE) Database</td><td><a href="http://www.comp.nus.edu.sg/~tsim/piedb.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the cmu pose, illumination, and expression database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4d423acc78273b75134e2afd1777ba6d3a398973</td></tr><tr><td>coco</td><td>COCO</td><td>Microsoft COCO: Common Objects in Context</td><td>Microsoft COCO: Common Objects in Context</td><td><a href="https://arxiv.org/pdf/1405.0312.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=microsoft coco: common objects in context&sort=relevance" target="_blank">[s2]</a></td><td></td><td>5e0f8c355a37a5a89351c02f174e7a5ddcb98683</td></tr><tr><td>coco_action</td><td>COCO-a</td><td>Describing Common Human Visual Actions in Images</td><td>Describing Common Human Visual Actions in Images</td><td><a href="https://arxiv.org/pdf/1506.02203.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=describing common human visual actions in images&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4946ba10a4d5a7d0a38372f23e6622bd347ae273</td></tr><tr><td>coco_qa</td><td>COCO QA</td><td>Exploring Models and Data for Image Question Answering</td><td>Exploring Models and Data for Image Question Answering</td><td><a href="https://arxiv.org/pdf/1505.02074.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=exploring models and data for image question answering&sort=relevance" target="_blank">[s2]</a></td><td></td><td>35b0331dfcd2897abd5749b49ff5e2b8ba0f7a62</td></tr><tr><td>cofw</td><td>COFW</td><td>Robust face landmark estimation under occlusion</td><td>Robust Face Landmark Estimation under Occlusion</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=robust face landmark estimation under occlusion&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2724ba85ec4a66de18da33925e537f3902f21249</td></tr><tr><td>cohn_kanade</td><td>CK</td><td>Comprehensive Database for Facial Expression Analysis</td><td>Comprehensive Database for Facial Expression Analysis</td><td><a href="https://pdfs.semanticscholar.org/23fc/83c8cfff14a16df7ca497661264fc54ed746.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=comprehensive database for facial expression analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>23fc83c8cfff14a16df7ca497661264fc54ed746</td></tr><tr><td>complex_activities</td><td>Ongoing Complex Activities</td><td>Recognition of Ongoing Complex Activities by Sequence Prediction over a Hierarchical Label Space</td><td>Recognition of ongoing complex activities by sequence prediction over a hierarchical label space</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=recognition of ongoing complex activities by sequence prediction over a hierarchical label space&sort=relevance" target="_blank">[s2]</a></td><td></td><td>65355cbb581a219bd7461d48b3afd115263ea760</td></tr><tr><td>cuhk_campus_03</td><td>CUHK03 Campus</td><td>Human Reidentification with Transferred Metric Learning</td><td>Human Reidentification with Transferred Metric Learning</td><td><a href="http://www.ee.cuhk.edu.hk/~xgwang/papers/liZWaccv12.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=human reidentification with transferred metric learning&sort=relevance" target="_blank">[s2]</a></td><td></td><td>44484d2866f222bbb9b6b0870890f9eea1ffb2d0</td></tr><tr><td>cuhk_campus_03</td><td>CUHK03 Campus</td><td>Locally Aligned Feature Transforms across Views</td><td>Locally Aligned Feature Transforms across Views</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=locally aligned feature transforms across views&sort=relevance" target="_blank">[s2]</a></td><td></td><td>38b55d95189c5e69cf4ab45098a48fba407609b4</td></tr><tr><td>cuhk_campus_03</td><td>CUHK03 Campus</td><td>DeepReID: Deep Filter Pairing Neural Network for Person Re-identification</td><td>DeepReID: Deep Filter Pairing Neural Network for Person Re-identification</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=deepreid: deep filter pairing neural network for person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6bd36e9fd0ef20a3074e1430a6cc601e6d407fc3</td></tr><tr><td>cvc_01_barcelona</td><td>CVC-01</td><td>Adaptive Image Sampling and Windows Classification for On-board Pedestrian Detection</td><td>Adaptive Image Sampling and Windows Classification for On-board Pedestrian Detection</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=adaptive image sampling and windows classification for on-board pedestrian detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>57fe081950f21ca03b5b375ae3e84b399c015861</td></tr><tr><td>ufi</td><td>UFI</td><td>Unconstrained Facial Images: Database for Face Recognition under Real-world Conditions</td><td>Unconstrained Facial Images: Database for Face Recognition Under Real-World Conditions</td><td><a href="http://home.zcu.cz/~pkral/papers/kral_micai15.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=unconstrained facial images: database for face recognition under real-world conditions&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4b4106614c1d553365bad75d7866bff0de6056ed</td></tr><tr><td>d3dfacs</td><td>D3DFACS</td><td>A FACS Valid 3D Dynamic Action Unit database with Applications to 3D Dynamic Morphable Facial Modelling</td><td>A FACS valid 3D dynamic action unit database with applications to 3D dynamic morphable facial modeling</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a facs valid 3d dynamic action unit database with applications to 3d dynamic morphable facial modelling&sort=relevance" target="_blank">[s2]</a></td><td></td><td>070de852bc6eb275d7ca3a9cdde8f6be8795d1a3</td></tr><tr><td>dartmouth_children</td><td>Dartmouth Children</td><td>The Dartmouth Database of Children's Faces: Acquisition and validation of a new face stimulus set</td><td>The Dartmouth Database of Children’s Faces: Acquisition and Validation of a New Face Stimulus Set</td><td><a href="https://pdfs.semanticscholar.org/4e6e/e936eb50dd032f7138702fa39b7c18ee8907.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the dartmouth database of children's faces: acquisition and validation of a new face stimulus set&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4e6ee936eb50dd032f7138702fa39b7c18ee8907</td></tr><tr><td>data_61</td><td>Data61 Pedestrian</td><td>A Multi-Modal Graphical Model for Scene Analysis</td><td>A Multi-modal Graphical Model for Scene Analysis</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a multi-modal graphical model for scene analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>563c940054e4b456661762c1ab858e6f730c3159</td></tr><tr><td>deep_fashion</td><td>DeepFashion</td><td>DeepFashion: Powering Robust Clothes Recognition and Retrieval with Rich Annotations</td><td>DeepFashion: Powering Robust Clothes Recognition and Retrieval with Rich Annotations</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=deepfashion: powering robust clothes recognition and retrieval with rich annotations&sort=relevance" target="_blank">[s2]</a></td><td></td><td>18010284894ed0edcca74e5bf768ee2e15ef7841</td></tr><tr><td>deep_fashion</td><td>DeepFashion</td><td>Fashion Landmark Detection in the Wild</td><td>Fashion Landmark Detection in the Wild</td><td><a href="https://arxiv.org/pdf/1608.03049.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=fashion landmark detection in the wild&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4fefd1bc8dc4e0ab37ee3324ddfa43ad9d6a04a7</td></tr><tr><td>distance_nighttime</td><td>Long Distance Heterogeneous Face</td><td>Nighttime Face Recognition at Long Distance: Cross-distance and Cross-spectral Matching</td><td>Nighttime Face Recognition at Long Distance: Cross-Distance and Cross-Spectral Matching</td><td><a href="https://pdfs.semanticscholar.org/4156/b7e88f2e0ab0a7c095b9bab199ae2b23bd06.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=nighttime face recognition at long distance: cross-distance and cross-spectral matching&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4156b7e88f2e0ab0a7c095b9bab199ae2b23bd06</td></tr><tr><td>duke_mtmc</td><td>Duke MTMC</td><td>Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</td><td>Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</td><td><a href="https://arxiv.org/pdf/1609.01775.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=performance measures and a data set for multi-target, multi-camera tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>27a2fad58dd8727e280f97036e0d2bc55ef5424c</td></tr><tr><td>duke_mtmc</td><td>Duke MTMC</td><td>Improving Person Re-identification by Attribute and Identity Learning</td><td>Improving Person Re-identification by Attribute and Identity Learning</td><td><a href="https://arxiv.org/pdf/1703.07220.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=improving person re-identification by attribute and identity learning&sort=relevance" target="_blank">[s2]</a></td><td></td><td>7f23a4bb0c777dd72cca7665a5f370ac7980217e</td></tr><tr><td>duke_mtmc</td><td>Duke MTMC</td><td>Unlabeled Samples Generated by GAN Improve the Person Re-identification Baseline in Vitro</td><td>Unlabeled Samples Generated by GAN Improve the Person Re-identification Baseline in Vitro</td><td><a href="https://arxiv.org/pdf/1701.07717.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=unlabeled samples generated by gan improve the person re-identification baseline in vitro&sort=relevance" target="_blank">[s2]</a></td><td></td><td>15e1af79939dbf90790b03d8aa02477783fb1d0f</td></tr><tr><td>duke_mtmc</td><td>Duke MTMC</td><td>Tracking Multiple People Online and in Real Time</td><td>Tracking Multiple People Online and in Real Time</td><td><a href="https://pdfs.semanticscholar.org/64e0/690dd176a93de9d4328f6e31fc4afe1e7536.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=tracking multiple people online and in real time&sort=relevance" target="_blank">[s2]</a></td><td></td><td>64e0690dd176a93de9d4328f6e31fc4afe1e7536</td></tr><tr><td>emotio_net</td><td>EmotioNet Database</td><td>EmotioNet: An Accurate, Real-Time Algorithm for the Automatic Annotation of a Million Facial Expressions in the Wild</td><td>EmotioNet: An Accurate, Real-Time Algorithm for the Automatic Annotation of a Million Facial Expressions in the Wild</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=emotionet: an accurate, real-time algorithm for the automatic annotation of a million facial expressions in the wild&sort=relevance" target="_blank">[s2]</a></td><td></td><td>c900e0ad4c95948baaf0acd8449fde26f9b4952a</td></tr><tr><td>erce</td><td>ERCe</td><td>Video Synopsis by Heterogeneous Multi-source Correlation</td><td>Video Synopsis by Heterogeneous Multi-source Correlation</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=video synopsis by heterogeneous multi-source correlation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>b6c293f0420f7e945b5916ae44269fb53e139275</td></tr><tr><td>erce</td><td>ERCe</td><td>Learning from Multiple Sources for Video Summarisation</td><td>Learning from Multiple Sources for Video Summarisation</td><td><a href="https://arxiv.org/pdf/1501.03069.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning from multiple sources for video summarisation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>287ddcb3db5562235d83aee318f318b8d5e43fb1</td></tr><tr><td>europersons</td><td>EuroCity Persons</td><td>The EuroCity Persons Dataset: A Novel Benchmark for Object Detection</td><td>The EuroCity Persons Dataset: A Novel Benchmark for Object Detection</td><td><a href="https://arxiv.org/pdf/1805.07193.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the eurocity persons dataset: a novel benchmark for object detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>72a155c987816ae81c858fddbd6beab656d86220</td></tr><tr><td>expw</td><td>ExpW</td><td>From Facial Expression Recognition to Interpersonal Relation Prediction</td><td>From Facial Expression Recognition to Interpersonal Relation Prediction</td><td><a href="https://arxiv.org/pdf/1609.06426.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=from facial expression recognition to interpersonal relation prediction&sort=relevance" target="_blank">[s2]</a></td><td></td><td>22f656d0f8426c84a33a267977f511f127bfd7f3</td></tr><tr><td>face_scrub</td><td>FaceScrub</td><td>A data-driven approach to cleaning large face datasets</td><td>A data-driven approach to cleaning large face datasets</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a data-driven approach to cleaning large face datasets&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0d3bb75852098b25d90f31d2f48fd0cb4944702b</td></tr><tr><td>face_tracer</td><td>FaceTracer</td><td>FaceTracer: A Search Engine for Large Collections of Images with Faces</td><td>FaceTracer: A Search Engine for Large Collections of Images with Faces</td><td><a href="https://pdfs.semanticscholar.org/73a8/1d311eedac8dea3ca24dc15b6990fa4a725e.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=facetracer: a search engine for large collections of images with faces&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4c170a0dcc8de75587dae21ca508dab2f9343974</td></tr><tr><td>face_tracer</td><td>FaceTracer</td><td>Face Swapping: Automatically Replacing Faces in Photographs</td><td>Face swapping: automatically replacing faces in photographs</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=face swapping: automatically replacing faces in photographs&sort=relevance" target="_blank">[s2]</a></td><td></td><td>670637d0303a863c1548d5b19f705860a23e285c</td></tr><tr><td>faceplace</td><td>Face Place</td><td>Recognizing disguised faces</td><td>Recognizing disguised faces</td><td><a href="https://pdfs.semanticscholar.org/d936/7ceb0be378c3a9ddf7cb741c678c1a3c574c.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=recognizing disguised faces&sort=relevance" target="_blank">[s2]</a></td><td></td><td>25474c21613607f6bb7687a281d5f9d4ffa1f9f3</td></tr><tr><td>fddb</td><td>FDDB</td><td>FDDB: A Benchmark for Face Detection in Unconstrained Settings</td><td>FDDB: A benchmark for face detection in unconstrained settings</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=fddb: a benchmark for face detection in unconstrained settings&sort=relevance" target="_blank">[s2]</a></td><td></td><td>75da1df4ed319926c544eefe17ec8d720feef8c0</td></tr><tr><td>fei</td><td>FEI</td><td>Captura e Alinhamento de Imagens: Um Banco de Faces Brasileiro</td><td>A new ranking method for principal components analysis and its application to face image analysis</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=captura e alinhamento de imagens: um banco de faces brasileiro&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8b56e33f33e582f3e473dba573a16b598ed9bcdc</td></tr><tr><td>feret</td><td>FERET</td><td>The FERET Verification Testing Protocol for Face Recognition Algorithms</td><td>The FERET Verification Testing Protocol for Face Recognition Algorithms</td><td><a href="https://pdfs.semanticscholar.org/8d2a/1c768fce6f71584dd993fb97e7b6419aaf60.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the feret verification testing protocol for face recognition algorithms&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0c4a139bb87c6743c7905b29a3cfec27a5130652</td></tr><tr><td>feret</td><td>FERET</td><td>The FERET Evaluation Methodology for Face-Recognition Algorithms</td><td>The FERET Evaluation Methodology for Face-Recognition Algorithms</td><td><a href="https://pdfs.semanticscholar.org/5099/7a5605c1f61e09e9a96789ed7495be6625aa.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the feret evaluation methodology for face-recognition algorithms&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0f0fcf041559703998abf310e56f8a2f90ee6f21</td></tr><tr><td>feret</td><td>FERET</td><td>FERET ( Face Recognition Technology ) Recognition Algorithm Development and Test Results</td><td>FERET ( Face Recognition Technology ) Recognition Algorithm Development and Test Results</td><td><a href="https://pdfs.semanticscholar.org/31de/9b3dd6106ce6eec9a35991b2b9083395fd0b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=feret ( face recognition technology ) recognition algorithm development and test results&sort=relevance" target="_blank">[s2]</a></td><td></td><td>31de9b3dd6106ce6eec9a35991b2b9083395fd0b</td></tr><tr><td>feret</td><td>FERET</td><td>The FERET database and evaluation procedure for face-recognition algorithms</td><td>The FERET database and evaluation procedure for face-recognition algorithms</td><td><a href="http://biometrics.nist.gov/cs_links/face/frvt/feret/FERET_Database_evaluation_procedure.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the feret database and evaluation procedure for face-recognition algorithms&sort=relevance" target="_blank">[s2]</a></td><td></td><td>dc8b25e35a3acb812beb499844734081722319b4</td></tr><tr><td>ferplus</td><td>FER+</td><td>Training Deep Networks for Facial Expression Recognition with Crowd-Sourced Label Distribution</td><td>Training deep networks for facial expression recognition with crowd-sourced label distribution</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=training deep networks for facial expression recognition with crowd-sourced label distribution&sort=relevance" target="_blank">[s2]</a></td><td></td><td>298cbc3dfbbb3a20af4eed97906650a4ea1c29e0</td></tr><tr><td>fia</td><td>CMU FiA</td><td>The CMU Face In Action (FIA) Database</td><td>The CMU Face In Action (FIA) Database</td><td><a href="https://pdfs.semanticscholar.org/4766/2d1a368daf70ba70ef2d59eb6209f98b675d.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the cmu face in action (fia) database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>47662d1a368daf70ba70ef2d59eb6209f98b675d</td></tr><tr><td>fiw_300</td><td>300-W</td><td>A semi-automatic methodology for facial landmark annotation</td><td>A Semi-automatic Methodology for Facial Landmark Annotation</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a semi-automatic methodology for facial landmark annotation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>013909077ad843eb6df7a3e8e290cfd5575999d2</td></tr><tr><td>fiw_300</td><td>300-W</td><td>300 Faces in-the-Wild Challenge: The first facial landmark localization Challenge</td><td>300 Faces in-the-Wild Challenge: The First Facial Landmark Localization Challenge</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=300 faces in-the-wild challenge: the first facial landmark localization challenge&sort=relevance" target="_blank">[s2]</a></td><td></td><td>044d9a8c61383312cdafbcc44b9d00d650b21c70</td></tr><tr><td>fiw_300</td><td>300-W</td><td>300 faces In-the-wild challenge: Database and results</td><td>300 Faces In-The-Wild Challenge: database and results</td><td><a href="http://ibug.doc.ic.ac.uk/media/uploads/documents/1-s2.0-s0262885616000147-main.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=300 faces in-the-wild challenge: database and results&sort=relevance" target="_blank">[s2]</a></td><td></td><td>e4754afaa15b1b53e70743880484b8d0736990ff</td></tr><tr><td>geofaces</td><td>GeoFaces</td><td>FACE2GPS: Estimating geographic location from facial features</td><td>Exploring the geo-dependence of human face appearance</td><td><a href="http://cs.uky.edu/~jacobs/papers/islam2014faces.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=face2gps: estimating geographic location from facial features&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2cd7821fcf5fae53a185624f7eeda007434ae037</td></tr><tr><td>geofaces</td><td>GeoFaces</td><td>Large-scale geo-facial image analysis</td><td>Large-scale geo-facial image analysis</td><td><a href="https://pdfs.semanticscholar.org/3ede/3ed28329bf48fbd06438a69c4f855bef003f.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=large-scale geo-facial image analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4af89578ac237278be310f7660a408b03f12d603</td></tr><tr><td>geofaces</td><td>GeoFaces</td><td>Exploring the Geo-Dependence of Human Face Appearance</td><td>Exploring the geo-dependence of human face appearance</td><td><a href="http://cs.uky.edu/~jacobs/papers/islam2014faces.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=exploring the geo-dependence of human face appearance&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2cd7821fcf5fae53a185624f7eeda007434ae037</td></tr><tr><td>geofaces</td><td>GeoFaces</td><td>GeoFaceExplorer: Exploring the Geo-Dependence of Facial Attributes</td><td>GeoFaceExplorer: exploring the geo-dependence of facial attributes</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=geofaceexplorer: exploring the geo-dependence of facial attributes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>17b46e2dad927836c689d6787ddb3387c6159ece</td></tr><tr><td>georgia_tech_face_database</td><td>Georgia Tech Face</td><td>Maximum likelihood training of the embedded HMM for face detection and recognition</td><td>Maximum Likelihood Training of the Embedded HMM for Face Detection and Recognition</td><td><a href="http://www.researchgate.net/profile/Monson_Hayes/publication/221124512_Maximum_Likelihood_Training_of_the_Embedded_HMM_for_Face_Detection_and_Recognition/links/0deec53509be9d6f55000000.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=maximum likelihood training of the embedded hmm for face detection and recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3dc3f0b64ef80f573e3a5f96e456e52ee980b877</td></tr><tr><td>gfw</td><td>Grouping Face in the Wild</td><td>Merge or Not? Learning to Group Faces via Imitation Learning</td><td>Merge or Not? Learning to Group Faces via Imitation Learning</td><td><a href="https://arxiv.org/pdf/1707.03986.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=merge or not? learning to group faces via imitation learning&sort=relevance" target="_blank">[s2]</a></td><td></td><td>e58dd160a76349d46f881bd6ddbc2921f08d1050</td></tr><tr><td>graz</td><td>Graz Pedestrian</td><td>Weak Hypotheses and Boosting for Generic Object Detection and Recognition</td><td>Weak Hypotheses and Boosting for Generic Object Detection and Recognition</td><td><a href="https://pdfs.semanticscholar.org/0c91/808994a250d7be332400a534a9291ca3b60e.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=weak hypotheses and boosting for generic object detection and recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0c91808994a250d7be332400a534a9291ca3b60e</td></tr><tr><td>h3d</td><td>H3D</td><td>Poselets: Body Part Detectors Trained Using 3D Human Pose Annotations</td><td>Poselets: Body part detectors trained using 3D human pose annotations</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=poselets: body part detectors trained using 3d human pose annotations&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2830fb5282de23d7784b4b4bc37065d27839a412</td></tr><tr><td>hda_plus</td><td>HDA+</td><td>The HDA+ data set for research on fully automated re-identification systems</td><td>The HDA+ Data Set for Research on Fully Automated Re-identification Systems</td><td><a href="https://pdfs.semanticscholar.org/8f02/ec0be21461fbcedf51d864f944cfc42c875f.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the hda+ data set for research on fully automated re-identification systems&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8f02ec0be21461fbcedf51d864f944cfc42c875f</td></tr><tr><td>hda_plus</td><td>HDA+</td><td>A Multi-camera video data set for research on High-Definition surveillance</td><td>HDA dataset-DRAFT 1 A Multi-camera video data set for research on High-Definition surveillance</td><td><a href="https://pdfs.semanticscholar.org/bd88/bb2e4f351352d88ee7375af834360e223498.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a multi-camera video data set for research on high-definition surveillance&sort=relevance" target="_blank">[s2]</a></td><td></td><td>bd88bb2e4f351352d88ee7375af834360e223498</td></tr><tr><td>helen</td><td>Helen</td><td>Interactive Facial Feature Localization</td><td>Interactive Facial Feature Localization</td><td><a href="https://pdfs.semanticscholar.org/95f1/2d27c3b4914e0668a268360948bce92f7db3.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=interactive facial feature localization&sort=relevance" target="_blank">[s2]</a></td><td></td><td>95f12d27c3b4914e0668a268360948bce92f7db3</td></tr><tr><td>hi4d_adsip</td><td>Hi4D-ADSIP</td><td>Hi4D-ADSIP 3-D dynamic facial articulation database</td><td>Hi4D-ADSIP 3-D dynamic facial articulation database</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=hi4d-adsip 3-d dynamic facial articulation database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a8d0b149c2eadaa02204d3e4356fbc8eccf3b315</td></tr><tr><td>hipsterwars</td><td>Hipsterwars</td><td>Hipster Wars: Discovering Elements of Fashion Styles</td><td>Hipster Wars: Discovering Elements of Fashion Styles</td><td><a href="https://pdfs.semanticscholar.org/04c2/cda00e5536f4b1508cbd80041e9552880e67.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=hipster wars: discovering elements of fashion styles&sort=relevance" target="_blank">[s2]</a></td><td></td><td>04c2cda00e5536f4b1508cbd80041e9552880e67</td></tr><tr><td>hollywood_headset</td><td>HollywoodHeads</td><td>Context-aware CNNs for person head detection</td><td>Context-Aware CNNs for Person Head Detection</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=context-aware cnns for person head detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0ceda9dae8b9f322df65ca2ef02caca9758aec6f</td></tr><tr><td>hrt_transgender</td><td>HRT Transgender</td><td>Is the Eye Region More Reliable Than the Face? A Preliminary Study of Face-based Recognition on a Transgender Dataset</td><td>Is the eye region more reliable than the face? A preliminary study of face-based recognition on a transgender dataset</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=is the eye region more reliable than the face? a preliminary study of face-based recognition on a transgender dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>137aa2f891d474fce1e7a1d1e9b3aefe21e22b34</td></tr><tr><td>ibm_dif</td><td>IBM Diversity in Faces</td><td>Diversity in Faces</td><td>Facial Coding Scheme Reference 1 Craniofacial Distances</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=diversity in faces&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0ab7cff2ccda7269b73ff6efd9d37e1318f7db25</td></tr><tr><td>ifad</td><td>IFAD</td><td>Indian Face Age Database: A Database for Face Recognition with Age Variation</td><td>Indian Face Age Database: A Database for Face Recognition with Age Variation</td><td><a href="https://pdfs.semanticscholar.org/025e/4cf3fd3fdeced91e9373b56ee14af7ca432c.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=indian face age database: a database for face recognition with age variation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>55c40cbcf49a0225e72d911d762c27bb1c2d14aa</td></tr><tr><td>ifdb</td><td>IFDB</td><td>Iranian Face Database and Evaluation with a New Detection Algorithm</td><td>Iranian Face Database and Evaluation with a New Detection Algorithm</td><td><a href="https://pdfs.semanticscholar.org/066d/71fcd997033dce4ca58df924397dfe0b5fd1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=iranian face database and evaluation with a new detection algorithm&sort=relevance" target="_blank">[s2]</a></td><td></td><td>066d71fcd997033dce4ca58df924397dfe0b5fd1</td></tr><tr><td>iit_dehli_ear</td><td>IIT Dehli Ear</td><td>Automated human identification using ear imaging</td><td>Automated Human Identification Using Ear Imaging</td><td><a href="https://pdfs.semanticscholar.org/faf4/0ce28857aedf183e193486f5b4b0a8c478a2.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=automated human identification using ear imaging&sort=relevance" target="_blank">[s2]</a></td><td></td><td>faf40ce28857aedf183e193486f5b4b0a8c478a2</td></tr><tr><td>ijb_b</td><td>IJB-B</td><td>IARPA Janus Benchmark-B Face Dataset</td><td>IARPA Janus Benchmark-B Face Dataset</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=iarpa janus benchmark-b face dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0cb2dd5f178e3a297a0c33068961018659d0f443</td></tr><tr><td>ijb_a</td><td>IJB-A</td><td>Pushing the Frontiers of Unconstrained Face Detection and Recognition: IARPA Janus Benchmark A</td><td>Pushing the frontiers of unconstrained face detection and recognition: IARPA Janus Benchmark A</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pushing the frontiers of unconstrained face detection and recognition: iarpa janus benchmark a&sort=relevance" target="_blank">[s2]</a></td><td></td><td>140c95e53c619eac594d70f6369f518adfea12ef</td></tr><tr><td>ijb_c</td><td>IJB-C</td><td>IARPA Janus Benchmark C</td><td>IARPA Janus Benchmark - C: Face Dataset and Protocol</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=iarpa janus benchmark c&sort=relevance" target="_blank">[s2]</a></td><td></td><td>57178b36c21fd7f4529ac6748614bb3374714e91</td></tr><tr><td>ilids_mcts</td><td>i-LIDS Multiple-Camera</td><td>Imagery Library for Intelligent Detection Systems: The i-LIDS User Guide</td><td>Imagery Library for Intelligent Detection Systems (i-LIDS); A Standard for Testing Video Based Detection Systems</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=imagery library for intelligent detection systems: the i-lids user guide&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0297448f3ed948e136bb06ceff10eccb34e5bb77</td></tr><tr><td>ilids_mcts_vid</td><td>iLIDS-VID</td><td>Person Re-Identication by Video Ranking</td><td>Person Re-identification by Video Ranking</td><td><a href="https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person re-identication by video ranking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>98bb029afe2a1239c3fdab517323066f0957b81b</td></tr><tr><td>imdb_face</td><td>IMDb Face</td><td>The Devil of Face Recognition is in the Noise</td><td>The Devil of Face Recognition is in the Noise</td><td><a href="https://arxiv.org/pdf/1807.11649.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the devil of face recognition is in the noise&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9e31e77f9543ab42474ba4e9330676e18c242e72</td></tr><tr><td>imdb_wiki</td><td>IMDB-Wiki</td><td>Deep expectation of real and apparent age from a single image without facial landmarks</td><td>Deep Expectation of Real and Apparent Age from a Single Image Without Facial Landmarks</td><td><a href="http://www.vision.ee.ethz.ch/en/publications/papers/articles/eth_biwi_01299.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=deep expectation of real and apparent age from a single image without facial landmarks&sort=relevance" target="_blank">[s2]</a></td><td></td><td>10195a163ab6348eef37213a46f60a3d87f289c5</td></tr><tr><td>imdb_wiki</td><td>IMDB-Wiki</td><td>DEX: Deep EXpectation of apparent age from a single image</td><td>DEX: Deep EXpectation of Apparent Age from a Single Image</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=dex: deep expectation of apparent age from a single image&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8355d095d3534ef511a9af68a3b2893339e3f96b</td></tr><tr><td>immediacy</td><td>Immediacy</td><td>Multi-task Recurrent Neural Network for Immediacy Prediction</td><td>Multi-task Recurrent Neural Network for Immediacy Prediction</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-task recurrent neural network for immediacy prediction&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1e3df3ca8feab0b36fd293fe689f93bb2aaac591</td></tr><tr><td>imsitu</td><td>imSitu</td><td>Situation Recognition: Visual Semantic Role Labeling for Image Understanding</td><td>Situation Recognition: Visual Semantic Role Labeling for Image Understanding</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=situation recognition: visual semantic role labeling for image understanding&sort=relevance" target="_blank">[s2]</a></td><td></td><td>51eba481dac6b229a7490f650dff7b17ce05df73</td></tr><tr><td>jaffe</td><td>JAFFE</td><td>Coding Facial Expressions with Gabor Wavelets</td><td>Coding Facial Expressions with Gabor Wavelets</td><td><a href="http://physics.lbl.gov/patrecog/images/Facerecog_gabor.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=coding facial expressions with gabor wavelets&sort=relevance" target="_blank">[s2]</a></td><td></td><td>45c31cde87258414f33412b3b12fc5bec7cb3ba9</td></tr><tr><td>jpl_pose</td><td>JPL-Interaction dataset</td><td>First-Person Activity Recognition: What Are They Doing to Me?</td><td>First-Person Activity Recognition: What Are They Doing to Me?</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=first-person activity recognition: what are they doing to me?&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1aad2da473888cb7ebc1bfaa15bfa0f1502ce005</td></tr><tr><td>kin_face</td><td>UB KinFace</td><td>Understanding Kin Relationships in a Photo</td><td>Understanding Kin Relationships in a Photo</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=understanding kin relationships in a photo&sort=relevance" target="_blank">[s2]</a></td><td></td><td>08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7</td></tr><tr><td>kin_face</td><td>UB KinFace</td><td>Kinship Verification through Transfer Learning</td><td>Kinship Verification through Transfer Learning</td><td><a href="https://pdfs.semanticscholar.org/4793/f11fbca4a7dba898b9fff68f70d868e2497c.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=kinship verification through transfer learning&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4793f11fbca4a7dba898b9fff68f70d868e2497c</td></tr><tr><td>kitti</td><td>KITTI</td><td>Vision meets Robotics: The KITTI Dataset</td><td>Vision meets robotics: The KITTI dataset</td><td><a href="https://pdfs.semanticscholar.org/026e/3363b7f76b51cc711886597a44d5f1fd1de2.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=vision meets robotics: the kitti dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>026e3363b7f76b51cc711886597a44d5f1fd1de2</td></tr><tr><td>lag</td><td>LAG</td><td>Large Age-Gap Face Verification by Feature Injection in Deep Networks</td><td>Large age-gap face verification by feature injection in deep networks</td><td><a href="https://arxiv.org/pdf/1602.06149.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=large age-gap face verification by feature injection in deep networks&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0d2dd4fc016cb6a517d8fb43a7cc3ff62964832e</td></tr><tr><td>laofiw</td><td>LAOFIW</td><td>Turning a Blind Eye: Explicit Removal of Biases and Variation from Deep Neural Network Embeddings</td><td>Turning a Blind Eye: Explicit Removal of Biases and Variation from Deep Neural Network Embeddings</td><td><a href="https://arxiv.org/pdf/1809.02169.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=turning a blind eye: explicit removal of biases and variation from deep neural network embeddings&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4eab317b5ac436a949849ed286baa3de2a541eef</td></tr><tr><td>large_scale_person_search</td><td>Large Scale Person Search</td><td>End-to-End Deep Learning for Person Search</td><td>End-to-End Deep Learning for Person Search</td><td><a href="https://pdfs.semanticscholar.org/2161/f6b7ee3c0acc81603b01dc0df689683577b9.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=end-to-end deep learning for person search&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2161f6b7ee3c0acc81603b01dc0df689683577b9</td></tr><tr><td>leeds_sports_pose</td><td>Leeds Sports Pose</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td><a href="http://www.bmva.org/bmvc/2010/conference/paper12/abstract12.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clustered pose and nonlinear appearance models for human pose estimation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td></tr><tr><td>lfpw</td><td>LFPW</td><td>Localizing Parts of Faces Using a Consensus of Exemplars</td><td>Localizing Parts of Faces Using a Consensus of Exemplars</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=localizing parts of faces using a consensus of exemplars&sort=relevance" target="_blank">[s2]</a></td><td></td><td>140438a77a771a8fb656b39a78ff488066eb6b50</td></tr><tr><td>lfw</td><td>LFW</td><td>Labeled Faces in the Wild: Updates and New Reporting Procedures</td><td>Labeled Faces in the Wild : Updates and New Reporting Procedures</td><td><a href="https://pdfs.semanticscholar.org/2d34/82dcff69c7417c7b933f22de606a0e8e42d4.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=labeled faces in the wild: updates and new reporting procedures&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2d3482dcff69c7417c7b933f22de606a0e8e42d4</td></tr><tr><td>lfw</td><td>LFW</td><td>Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments</td><td>Labeled Faces in the Wild: A Database forStudying Face Recognition in Unconstrained Environments</td><td><a href="https://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=labeled faces in the wild: a database for studying face recognition in unconstrained environments&sort=relevance" target="_blank">[s2]</a></td><td></td><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td></tr><tr><td>lfw</td><td>LFW</td><td>Labeled Faces in the Wild: A Survey</td><td>Labeled Faces in the Wild: A Survey</td><td><a href="https://pdfs.semanticscholar.org/7de6/e81d775e9cd7becbfd1bd685f4e2a5eebb22.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=labeled faces in the wild: a survey&sort=relevance" target="_blank">[s2]</a></td><td></td><td>7de6e81d775e9cd7becbfd1bd685f4e2a5eebb22</td></tr><tr><td>lfw</td><td>LFW</td><td>Effective Unconstrained Face Recognition by Combining Multiple Descriptors and Learned Background Statistics</td><td>Effective Unconstrained Face Recognition by Combining Multiple Descriptors and Learned Background Statistics</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=effective unconstrained face recognition by combining multiple descriptors and learned background statistics&sort=relevance" target="_blank">[s2]</a></td><td></td><td>133f01aec1534604d184d56de866a4bd531dac87</td></tr><tr><td>m2vtsdb_extended</td><td>xm2vtsdb</td><td>XM2VTSDB: The Extended M2VTS Database</td><td>XM2VTSDB : The extended M2VTS database</td><td><a href="https://pdfs.semanticscholar.org/b626/28ac06bbac998a3ab825324a41a11bc3a988.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=xm2vtsdb: the extended m2vts database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>b62628ac06bbac998a3ab825324a41a11bc3a988</td></tr><tr><td>mafa</td><td>MAsked FAces</td><td>Detecting Masked Faces in the Wild with LLE-CNNs</td><td>Detecting Masked Faces in the Wild with LLE-CNNs</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=detecting masked faces in the wild with lle-cnns&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9cc8cf0c7d7fa7607659921b6ff657e17e135ecc</td></tr><tr><td>mafl</td><td>MAFL</td><td>Facial Landmark Detection by Deep Multi-task Learning</td><td>Facial Landmark Detection by Deep Multi-task Learning</td><td><a href="https://pdfs.semanticscholar.org/fcd7/1c18192928a2e0b264edd4d919ab2f8f652a.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=facial landmark detection by deep multi-task learning&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8a3c5507237957d013a0fe0f082cab7f757af6ee</td></tr><tr><td>mafl</td><td>MAFL</td><td>Learning Deep Representation for Face Alignment with Auxiliary Attributes</td><td>Learning Deep Representation for Face Alignment with Auxiliary Attributes</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning deep representation for face alignment with auxiliary attributes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td></tr><tr><td>malf</td><td>MALF</td><td>Fine-grained Evaluation on Face Detection in the Wild.</td><td>Fine-grained evaluation on face detection in the wild</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=fine-grained evaluation on face detection in the wild.&sort=relevance" target="_blank">[s2]</a></td><td></td><td>45e616093a92e5f1e61a7c6037d5f637aa8964af</td></tr><tr><td>mapillary</td><td>Mapillary</td><td>The Mapillary Vistas Dataset for Semantic Understanding of Street Scenes</td><td>The Mapillary Vistas Dataset for Semantic Understanding of Street Scenes</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the mapillary vistas dataset for semantic understanding of street scenes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>79828e6e9f137a583082b8b5a9dfce0c301989b8</td></tr><tr><td>market_1501</td><td>Market 1501</td><td>Improving Person Re-identification by Attribute and Identity Learning</td><td>Improving Person Re-identification by Attribute and Identity Learning</td><td><a href="https://arxiv.org/pdf/1703.07220.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=improving person re-identification by attribute and identity learning&sort=relevance" target="_blank">[s2]</a></td><td></td><td>7f23a4bb0c777dd72cca7665a5f370ac7980217e</td></tr><tr><td>market_1501</td><td>Market 1501</td><td>Scalable Person Re-identification: A Benchmark</td><td>Scalable Person Re-identification: A Benchmark</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=scalable person re-identification: a benchmark&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4308bd8c28e37e2ed9a3fcfe74d5436cce34b410</td></tr><tr><td>market_1501</td><td>Market 1501</td><td>Orientation Driven Bag of Appearances for Person Re-identification</td><td>Orientation Driven Bag of Appearances for Person Re-identification</td><td><a href="https://arxiv.org/pdf/1605.02464.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=orientation driven bag of appearances for person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a7fe834a0af614ce6b50dc093132b031dd9a856b</td></tr><tr><td>mars</td><td>MARS</td><td>MARS: A Video Benchmark for Large-Scale Person Re-identification</td><td>MARS: A Video Benchmark for Large-Scale Person Re-Identification</td><td><a href="https://pdfs.semanticscholar.org/c038/7e788a52f10bf35d4d50659cfa515d89fbec.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=mars: a video benchmark for large-scale person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>c0387e788a52f10bf35d4d50659cfa515d89fbec</td></tr><tr><td>mcgill</td><td>McGill Real World</td><td>Hierarchical Temporal Graphical Model for Head Pose Estimation and Subsequent Attribute Classification in Real-World Videos</td><td>Hierarchical temporal graphical model for head pose estimation and subsequent attribute classification in real-world videos</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=hierarchical temporal graphical model for head pose estimation and subsequent attribute classification in real-world videos&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2d45cfd838016a6e39f6b766ffe85acd649440c7</td></tr><tr><td>megaage</td><td>MegaAge</td><td>Quantifying Facial Age by Posterior of Age Comparisons</td><td>Quantifying Facial Age by Posterior of Age Comparisons</td><td><a href="https://arxiv.org/pdf/1708.09687.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=quantifying facial age by posterior of age comparisons&sort=relevance" target="_blank">[s2]</a></td><td></td><td>c72a2ea819df9b0e8cd267eebcc6528b8741e03d</td></tr><tr><td>megaface</td><td>MegaFace</td><td>Level Playing Field for Million Scale Face Recognition</td><td>Level Playing Field for Million Scale Face Recognition</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=level playing field for million scale face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>28d4e027c7e90b51b7d8908fce68128d1964668a</td></tr><tr><td>megaface</td><td>MegaFace</td><td>The MegaFace Benchmark: 1 Million Faces for Recognition at Scale</td><td>The MegaFace Benchmark: 1 Million Faces for Recognition at Scale</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the megaface benchmark: 1 million faces for recognition at scale&sort=relevance" target="_blank">[s2]</a></td><td></td><td>96e0cfcd81cdeb8282e29ef9ec9962b125f379b0</td></tr><tr><td>mit_cbcl</td><td>MIT CBCL</td><td>Component-based Face Recognition with 3D Morphable Models</td><td>Component-Based Face Recognition with 3D Morphable Models</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=component-based face recognition with 3d morphable models&sort=relevance" target="_blank">[s2]</a></td><td></td><td>079a0a3bf5200994e1f972b1b9197bf2f90e87d4</td></tr><tr><td>mmi_facial_expression</td><td>MMI Facial Expression Dataset</td><td>WEB-BASED DATABASE FOR FACIAL EXPRESSION ANALYSIS</td><td>Web-based database for facial expression analysis</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=web-based database for facial expression analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2a75f34663a60ab1b04a0049ed1d14335129e908</td></tr><tr><td>moments_in_time</td><td>Moments in Time</td><td>Moments in Time Dataset: one million videos for event understanding</td><td>Moments in Time Dataset: one million videos for event understanding</td><td><a href="https://arxiv.org/pdf/1801.03150.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=moments in time dataset: one million videos for event understanding&sort=relevance" target="_blank">[s2]</a></td><td></td><td>41976ebc8ab76d9a6861487c97cc7fcbe3b6015f</td></tr><tr><td>morph</td><td>MORPH Commercial</td><td>MORPH: A Longitudinal Image Database of Normal Adult Age-Progression</td><td>MORPH: a longitudinal image database of normal adult age-progression</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=morph: a longitudinal image database of normal adult age-progression&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9055b155cbabdce3b98e16e5ac9c0edf00f9552f</td></tr><tr><td>morph_nc</td><td>MORPH Non-Commercial</td><td>MORPH: A Longitudinal Image Database of Normal Adult Age-Progression</td><td>MORPH: a longitudinal image database of normal adult age-progression</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=morph: a longitudinal image database of normal adult age-progression&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9055b155cbabdce3b98e16e5ac9c0edf00f9552f</td></tr><tr><td>mot</td><td>MOT</td><td>Evaluating Multiple Object Tracking Performance: The CLEAR MOT Metrics</td><td>Evaluating Multiple Object Tracking Performance: The CLEAR MOT Metrics</td><td><a href="https://cvhci.anthropomatik.kit.edu/images/stories/msmmi/papers/eurasip2008.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=evaluating multiple object tracking performance: the clear mot metrics&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2258e01865367018ed6f4262c880df85b94959f8</td></tr><tr><td>mot</td><td>MOT</td><td>Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</td><td>Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</td><td><a href="https://arxiv.org/pdf/1609.01775.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=performance measures and a data set for multi-target, multi-camera tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>27a2fad58dd8727e280f97036e0d2bc55ef5424c</td></tr><tr><td>mpi_large</td><td>Large MPI Facial Expression</td><td>The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</td><td>The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</td><td><a href="https://pdfs.semanticscholar.org/ea05/0801199f98a1c7c1df6769f23f658299a3ae.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the mpi facial expression database — a validated database of emotional and conversational facial expressions&sort=relevance" target="_blank">[s2]</a></td><td></td><td>ea050801199f98a1c7c1df6769f23f658299a3ae</td></tr><tr><td>mpi_small</td><td>Small MPI Facial Expression</td><td>The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</td><td>The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</td><td><a href="https://pdfs.semanticscholar.org/ea05/0801199f98a1c7c1df6769f23f658299a3ae.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the mpi facial expression database — a validated database of emotional and conversational facial expressions&sort=relevance" target="_blank">[s2]</a></td><td></td><td>ea050801199f98a1c7c1df6769f23f658299a3ae</td></tr><tr><td>mpii_gaze</td><td>MPIIGaze</td><td>Appearance-based Gaze Estimation in the Wild</td><td>Appearance-based gaze estimation in the wild</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=appearance-based gaze estimation in the wild&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0df0d1adea39a5bef318b74faa37de7f3e00b452</td></tr><tr><td>mpii_human_pose</td><td>MPII Human Pose</td><td>2D Human Pose Estimation: New Benchmark and State of the Art Analysis</td><td>2D Human Pose Estimation: New Benchmark and State of the Art Analysis</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=2d human pose estimation: new benchmark and state of the art analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3325860c0c82a93b2eac654f5324dd6a776f609e</td></tr><tr><td>mr2</td><td>MR2</td><td>The MR2: A multi-racial mega-resolution database of facial stimuli</td><td>The MR2: A multi-racial, mega-resolution database of facial stimuli.</td><td><a href="http://www.mpmlab.org/The%20MR2%20face%20database.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the mr2: a multi-racial mega-resolution database of facial stimuli&sort=relevance" target="_blank">[s2]</a></td><td></td><td>578d4ad74818086bb64f182f72e2c8bd31e3d426</td></tr><tr><td>mrp_drone</td><td>MRP Drone</td><td>Investigating Open-World Person Re-identification Using a Drone</td><td>Investigating Open-World Person Re-identification Using a Drone</td><td><a href="https://pdfs.semanticscholar.org/ad01/687649d95cd5b56d7399a9603c4b8e2217d7.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=investigating open-world person re-identification using a drone&sort=relevance" target="_blank">[s2]</a></td><td></td><td>ad01687649d95cd5b56d7399a9603c4b8e2217d7</td></tr><tr><td>msceleb</td><td>MsCeleb</td><td>MS-Celeb-1M: A Dataset and Benchmark for Large-Scale Face Recognition</td><td>MS-Celeb-1M: A Dataset and Benchmark for Large-Scale Face Recognition</td><td><a href="https://arxiv.org/pdf/1607.08221.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=ms-celeb-1m: a dataset and benchmark for large-scale face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>291265db88023e92bb8c8e6390438e5da148e8f5</td></tr><tr><td>msmt_17</td><td>MSMT17</td><td>Person Transfer GAN to Bridge Domain Gap for Person Re-Identification</td><td>Person Transfer GAN to Bridge Domain Gap for Person Re-identification</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person transfer gan to bridge domain gap for person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a0cc5f73a37723a6dd465924143f1cb4976d0169</td></tr><tr><td>mtfl</td><td>MTFL</td><td>Facial Landmark Detection by Deep Multi-task Learning</td><td>Facial Landmark Detection by Deep Multi-task Learning</td><td><a href="https://pdfs.semanticscholar.org/fcd7/1c18192928a2e0b264edd4d919ab2f8f652a.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=facial landmark detection by deep multi-task learning&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8a3c5507237957d013a0fe0f082cab7f757af6ee</td></tr><tr><td>mtfl</td><td>MTFL</td><td>Learning Deep Representation for Face Alignment with Auxiliary Attributes</td><td>Learning Deep Representation for Face Alignment with Auxiliary Attributes</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning deep representation for face alignment with auxiliary attributes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td></tr><tr><td>multi_pie</td><td>MULTIPIE</td><td>Multi-PIE</td><td>The CMU Pose, Illumination, and Expression (PIE) Database</td><td><a href="http://www.comp.nus.edu.sg/~tsim/piedb.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-pie&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4d423acc78273b75134e2afd1777ba6d3a398973</td></tr><tr><td>names_and_faces</td><td>News Dataset</td><td>Names and Faces</td><td>Names and faces in the news</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=names and faces&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2fda164863a06a92d3a910b96eef927269aeb730</td></tr><tr><td>nova_emotions</td><td>Novaemötions Dataset</td><td>Crowdsourcing facial expressions for affective-interaction</td><td>Crowdsourcing facial expressions for affective-interaction</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=crowdsourcing facial expressions for affective-interaction&sort=relevance" target="_blank">[s2]</a></td><td></td><td>c06b13d0ec3f5c43e2782cd22542588e233733c3</td></tr><tr><td>orl</td><td>ORL</td><td>Parameterisation of a Stochastic Model for Human Face Identification</td><td>Parameterisation of a stochastic model for human face identification</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=parameterisation of a stochastic model for human face identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>55206f0b5f57ce17358999145506cd01e570358c</td></tr><tr><td>pa_100k</td><td>PA-100K</td><td>HydraPlus-Net: Attentive Deep Features for Pedestrian Analysis</td><td>HydraPlus-Net: Attentive Deep Features for Pedestrian Analysis</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=hydraplus-net: attentive deep features for pedestrian analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>f41c7bb02fc97d5fb9cadd7a49c3e558a1c58a44</td></tr><tr><td>penn_fudan</td><td>Penn Fudan</td><td>Object Detection Combining Recognition and Segmentation</td><td>Object Detection Combining Recognition and Segmentation</td><td><a href="https://pdfs.semanticscholar.org/3394/168ff0719b03ff65bcea35336a76b21fe5e4.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=object detection combining recognition and segmentation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3394168ff0719b03ff65bcea35336a76b21fe5e4</td></tr><tr><td>peta</td><td>PETA</td><td>Pedestrian Attribute Recognition At Far Distance</td><td>Pedestrian Attribute Recognition At Far Distance</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pedestrian attribute recognition at far distance&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2a4bbee0b4cf52d5aadbbc662164f7efba89566c</td></tr><tr><td>pets</td><td>PETS 2017</td><td>PETS 2017: Dataset and Challenge</td><td>PETS 2017: Dataset and Challenge</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pets 2017: dataset and challenge&sort=relevance" target="_blank">[s2]</a></td><td></td><td>22909dd19a0ec3b6065334cb5be5392cb24d839d</td></tr><tr><td>pilot_parliament</td><td>PPB</td><td>Gender Shades: Intersectional Accuracy Disparities in Commercial Gender Classification</td><td>Gender Shades: Intersectional Accuracy Disparities in Commercial Gender Classification</td><td><a href="http://proceedings.mlr.press/v81/buolamwini18a/buolamwini18a-supp.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=gender shades: intersectional accuracy disparities in commercial gender classification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>18858cc936947fc96b5c06bbe3c6c2faa5614540</td></tr><tr><td>pipa</td><td>PIPA</td><td>Beyond Frontal Faces: Improving Person Recognition Using Multiple Cues</td><td>Beyond frontal faces: Improving Person Recognition using multiple cues</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=beyond frontal faces: improving person recognition using multiple cues&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0a85bdff552615643dd74646ac881862a7c7072d</td></tr><tr><td>pku_reid</td><td>PKU-Reid</td><td>Swiss-System Based Cascade Ranking for Gait-based Person Re-identification</td><td>Swiss-System Based Cascade Ranking for Gait-Based Person Re-Identification</td><td><a href="https://pdfs.semanticscholar.org/f6c8/d5e35d7e4d60a0104f233ac1a3ab757da53f.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=swiss-system based cascade ranking for gait-based person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>f6c8d5e35d7e4d60a0104f233ac1a3ab757da53f</td></tr><tr><td>pku_reid</td><td>PKU-Reid</td><td>Orientation driven bag of appearances for person re-identification</td><td>Orientation Driven Bag of Appearances for Person Re-identification</td><td><a href="https://arxiv.org/pdf/1605.02464.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=orientation driven bag of appearances for person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a7fe834a0af614ce6b50dc093132b031dd9a856b</td></tr><tr><td>precarious</td><td>Precarious</td><td>Expecting the Unexpected: Training Detectors for Unusual Pedestrians With Adversarial Imposters</td><td>Expecting the Unexpected: Training Detectors for Unusual Pedestrians with Adversarial Imposters</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=expecting the unexpected: training detectors for unusual pedestrians with adversarial imposters&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9e5378e7b336c89735d3bb15cf67eff96f86d39a</td></tr><tr><td>prid</td><td>PRID</td><td>Person Re-Identification by Descriptive and Discriminative Classification</td><td>Person Re-identification by Descriptive and Discriminative Classification</td><td><a href="https://pdfs.semanticscholar.org/4c1b/f0592be3e535faf256c95e27982db9b3d3d3.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person re-identification by descriptive and discriminative classification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>16c7c31a7553d99f1837fc6e88e77b5ccbb346b8</td></tr><tr><td>prw</td><td>PRW</td><td>Person Re-identification in the Wild</td><td>Person Re-identification in the Wild</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person re-identification in the wild&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0b84f07af44f964817675ad961def8a51406dd2e</td></tr><tr><td>psu</td><td>PSU</td><td>Vision-based Analysis of Small Groups in Pedestrian Crowds</td><td>Vision-Based Analysis of Small Groups in Pedestrian Crowds</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=vision-based analysis of small groups in pedestrian crowds&sort=relevance" target="_blank">[s2]</a></td><td></td><td>066000d44d6691d27202896691f08b27117918b9</td></tr><tr><td>pubfig</td><td>PubFig</td><td>Attribute and Simile Classifiers for Face Verification</td><td>Attribute and simile classifiers for face verification</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=attribute and simile classifiers for face verification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>759a3b3821d9f0e08e0b0a62c8b693230afc3f8d</td></tr><tr><td>put_face</td><td>Put Face</td><td>The PUT face database</td><td>The put face database</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the put face database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>ae0aee03d946efffdc7af2362a42d3750e7dd48a</td></tr><tr><td>qmul_surv_face</td><td>QMUL-SurvFace</td><td>Surveillance Face Recognition Challenge</td><td>Surveillance Face Recognition Challenge</td><td><a href="https://arxiv.org/pdf/1804.09691.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=surveillance face recognition challenge&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2306b2a8fba28539306052764a77a0d0f5d1236a</td></tr><tr><td>rafd</td><td>RaFD</td><td>Presentation and validation of the Radboud Faces Database</td><td>Presentation and validation of the Radboud Faces Database</td><td><a href="https://pdfs.semanticscholar.org/3765/df816dc5a061bc261e190acc8bdd9d47bec0.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=presentation and validation of the radboud faces database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3765df816dc5a061bc261e190acc8bdd9d47bec0</td></tr><tr><td>raid</td><td>RAiD</td><td>Consistent Re-identification in a Camera Network</td><td>Consistent Re-identification in a Camera Network</td><td><a href="https://pdfs.semanticscholar.org/c27f/099e6e7e3f7f9979cbe9e0a5175fc5848ea0.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=consistent re-identification in a camera network&sort=relevance" target="_blank">[s2]</a></td><td></td><td>09d78009687bec46e70efcf39d4612822e61cb8c</td></tr><tr><td>rap_pedestrian</td><td>RAP</td><td>A Richly Annotated Dataset for Pedestrian Attribute Recognition</td><td>A Richly Annotated Dataset for Pedestrian Attribute Recognition</td><td><a href="https://arxiv.org/pdf/1603.07054.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a richly annotated dataset for pedestrian attribute recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>221c18238b829c12b911706947ab38fd017acef7</td></tr><tr><td>reseed</td><td>ReSEED</td><td>ReSEED: Social Event dEtection Dataset</td><td>ReSEED: social event dEtection dataset</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=reseed: social event detection dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>54983972aafc8e149259d913524581357b0f91c3</td></tr><tr><td>saivt</td><td>SAIVT SoftBio</td><td>A Database for Person Re-Identification in Multi-Camera Surveillance Networks</td><td>A Database for Person Re-Identification in Multi-Camera Surveillance Networks</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a database for person re-identification in multi-camera surveillance networks&sort=relevance" target="_blank">[s2]</a></td><td></td><td>22646e00a7ba34d1b5fbe3b1efcd91a1e1be3c2b</td></tr><tr><td>sarc3d</td><td>Sarc3D</td><td>SARC3D: a new 3D body model for People Tracking and Re-identification</td><td>SARC3D: A New 3D Body Model for People Tracking and Re-identification</td><td><a href="https://pdfs.semanticscholar.org/e27e/f52c641c2b5100a1b34fd0b819e84a31b4df.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=sarc3d: a new 3d body model for people tracking and re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>e27ef52c641c2b5100a1b34fd0b819e84a31b4df</td></tr><tr><td>scface</td><td>SCface</td><td>SCface – surveillance cameras face database</td><td>SCface – surveillance cameras face database</td><td><a href="http://scface.org/SCface%20-%20Surveillance%20Cameras%20Face%20Database.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=scface – surveillance cameras face database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>29a705a5fa76641e0d8963f1fdd67ee4c0d92d3d</td></tr><tr><td>scut_fbp</td><td>SCUT-FBP</td><td>SCUT-FBP: A Benchmark Dataset for Facial Beauty Perception</td><td>SCUT-FBP: A Benchmark Dataset for Facial Beauty Perception</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=scut-fbp: a benchmark dataset for facial beauty perception&sort=relevance" target="_blank">[s2]</a></td><td></td><td>bd26dabab576adb6af30484183c9c9c8379bf2e0</td></tr><tr><td>scut_head</td><td>SCUT HEAD</td><td>Detecting Heads using Feature Refine Net and Cascaded Multi-scale Architecture</td><td>Detecting Heads using Feature Refine Net and Cascaded Multi-scale Architecture</td><td><a href="https://arxiv.org/pdf/1803.09256.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=detecting heads using feature refine net and cascaded multi-scale architecture&sort=relevance" target="_blank">[s2]</a></td><td></td><td>d3200d49a19a4a4e4e9745ee39649b65d80c834b</td></tr><tr><td>sdu_vid</td><td>SDU-VID</td><td>A Spatio-Temporal Appearance Representation for Video-Based Pedestrian Re-Identification</td><td>A Spatio-Temporal Appearance Representation for Video-Based Pedestrian Re-Identification</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a spatio-temporal appearance representation for video-based pedestrian re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3b4ec8af470948a72a6ed37a9fd226719a874ebc</td></tr><tr><td>sdu_vid</td><td>SDU-VID</td><td>Local descriptors encoded by Fisher vectors for person re-identification</td><td>Local Descriptors Encoded by Fisher Vectors for Person Re-identification</td><td><a href="https://pdfs.semanticscholar.org/a105/f1ef67b4b02da38eadce8ffb4e13aa301a93.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=local descriptors encoded by fisher vectors for person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>46a01565e6afe7c074affb752e7069ee3bf2e4ef</td></tr><tr><td>sdu_vid</td><td>SDU-VID</td><td>Person reidentification by video ranking</td><td>Person Re-identification by Video Ranking</td><td><a href="https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person reidentification by video ranking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>98bb029afe2a1239c3fdab517323066f0957b81b</td></tr><tr><td>social_relation</td><td>Social Relation</td><td>Learning Social Relation Traits from Face Images</td><td>Learning Social Relation Traits from Face Images</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning social relation traits from face images&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2a171f8d14b6b8735001a11c217af9587d095848</td></tr><tr><td>soton</td><td>SOTON HiD</td><td>On a Large Sequence-Based Human Gait Database</td><td>On a Large Sequence-Based Human Gait Database</td><td><a href="https://pdfs.semanticscholar.org/4f93/cd09785c6e77bf4bc5a788e079df524c8d21.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=on a large sequence-based human gait database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4f93cd09785c6e77bf4bc5a788e079df524c8d21</td></tr><tr><td>sports_videos_in_the_wild</td><td>SVW</td><td>Sports Videos in the Wild (SVW): A Video Dataset for Sports Analysis</td><td>Sports Videos in the Wild (SVW): A video dataset for sports analysis</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=sports videos in the wild (svw): a video dataset for sports analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1a40092b493c6b8840257ab7f96051d1a4dbfeb2</td></tr><tr><td>stair_actions</td><td>STAIR Action</td><td>STAIR Actions: A Video Dataset of Everyday Home Actions</td><td>STAIR Actions: A Video Dataset of Everyday Home Actions</td><td><a href="https://arxiv.org/pdf/1804.04326.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=stair actions: a video dataset of everyday home actions&sort=relevance" target="_blank">[s2]</a></td><td></td><td>d3f5a1848b0028d8ab51d0b0673732cad2e3c8c9</td></tr><tr><td>stanford_drone</td><td>Stanford Drone</td><td>Learning Social Etiquette: Human Trajectory Prediction In Crowded Scenes</td><td>Social LSTM: Human Trajectory Prediction in Crowded Spaces</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning social etiquette: human trajectory prediction in crowded scenes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>570f37ed63142312e6ccdf00ecc376341ec72b9f</td></tr><tr><td>stickmen_buffy</td><td>Buffy Stickmen</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td><a href="http://www.bmva.org/bmvc/2010/conference/paper12/abstract12.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clustered pose and nonlinear appearance models for human pose estimation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td></tr><tr><td>stickmen_buffy</td><td>Buffy Stickmen</td><td>Learning to Parse Images of Articulated Objects</td><td>Learning to parse images of articulated bodies</td><td><a href="http://machinelearning.wustl.edu/mlpapers/paper_files/NIPS2006_899.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning to parse images of articulated objects&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td></tr><tr><td>stickmen_family</td><td>We Are Family Stickmen</td><td>We Are Family: Joint Pose Estimation of Multiple Persons</td><td>We Are Family: Joint Pose Estimation of Multiple Persons</td><td><a href="http://eprints.pascal-network.org/archive/00007964/01/eichner10eccv.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=we are family: joint pose estimation of multiple persons&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0dc11a37cadda92886c56a6fb5191ded62099c28</td></tr><tr><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td>Learning to parse images of articulated bodies</td><td><a href="http://machinelearning.wustl.edu/mlpapers/paper_files/NIPS2006_899.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clustered pose and nonlinear appearance models for human pose estimation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td></tr><tr><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td>Learning to Parse Images of Articulated Objects</td><td>Learning to parse images of articulated bodies</td><td><a href="http://machinelearning.wustl.edu/mlpapers/paper_files/NIPS2006_899.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning to parse images of articulated objects&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td></tr><tr><td>sun_attributes</td><td>SUN</td><td>The SUN Attribute Database: Beyond Categories for Deeper Scene Understanding</td><td>The SUN Attribute Database: Beyond Categories for Deeper Scene Understanding</td><td><a href="http://www.cc.gatech.edu/~hays/papers/attribute_ijcv.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the sun attribute database: beyond categories for deeper scene understanding&sort=relevance" target="_blank">[s2]</a></td><td></td><td>66e6f08873325d37e0ec20a4769ce881e04e964e</td></tr><tr><td>svs</td><td>SVS</td><td>Pedestrian Attribute Classification in Surveillance: Database and Evaluation</td><td>Pedestrian Attribute Classification in Surveillance: Database and Evaluation</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pedestrian attribute classification in surveillance: database and evaluation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>488e475eeb3bb39a145f23ede197cd3620f1d98a</td></tr><tr><td>texas_3dfrd</td><td>Texas 3DFRD</td><td>Anthropometric 3D Face Recognition</td><td>Anthropometric 3D Face Recognition</td><td><a href="http://live.ece.utexas.edu/publications/2010/sg_ijcv_june10.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=anthropometric 3d face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2ce2560cf59db59ce313bbeb004e8ce55c5ce928</td></tr><tr><td>texas_3dfrd</td><td>Texas 3DFRD</td><td>Texas 3D Face Recognition Database</td><td>Texas 3D Face Recognition Database</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=texas 3d face recognition database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4d58f886f5150b2d5e48fd1b5a49e09799bf895d</td></tr><tr><td>tiny_faces</td><td>TinyFace</td><td>Low-Resolution Face Recognition</td><td>Low-Resolution Face Recognition</td><td><a href="https://arxiv.org/pdf/1811.08965.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=low-resolution face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8990cdce3f917dad622e43e033db686b354d057c</td></tr><tr><td>tiny_images</td><td>Tiny Images</td><td>80 million tiny images: a large dataset for non-parametric object and scene recognition</td><td>80 Million Tiny Images: A Large Data Set for Nonparametric Object and Scene Recognition</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=80 million tiny images: a large dataset for non-parametric object and scene recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>31b58ced31f22eab10bd3ee2d9174e7c14c27c01</td></tr><tr><td>tisi</td><td>Times Square Intersection</td><td>Video Synopsis by Heterogeneous Multi-source Correlation</td><td>Video Synopsis by Heterogeneous Multi-source Correlation</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=video synopsis by heterogeneous multi-source correlation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>b6c293f0420f7e945b5916ae44269fb53e139275</td></tr><tr><td>tisi</td><td>Times Square Intersection</td><td>Learning from Multiple Sources for Video Summarisation</td><td>Learning from Multiple Sources for Video Summarisation</td><td><a href="https://arxiv.org/pdf/1501.03069.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning from multiple sources for video summarisation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>287ddcb3db5562235d83aee318f318b8d5e43fb1</td></tr><tr><td>oxford_town_centre</td><td>TownCentre</td><td>Stable Multi-Target Tracking in Real-Time Surveillance Video</td><td>Stable multi-target tracking in real-time surveillance video</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=stable multi-target tracking in real-time surveillance video&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9361b784e73e9238d5cefbea5ac40d35d1e3103f</td></tr><tr><td>tud_brussels</td><td>TUD-Brussels</td><td>Multi-Cue Onboard Pedestrian Detection</td><td>Multi-cue onboard pedestrian detection</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-cue onboard pedestrian detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6ad5a38df8dd4cdddd74f31996ce096d41219f72</td></tr><tr><td>tud_campus</td><td>TUD-Campus</td><td>People-Tracking-by-Detection and People-Detection-by-Tracking</td><td>People-tracking-by-detection and people-detection-by-tracking</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=people-tracking-by-detection and people-detection-by-tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td></tr><tr><td>tud_crossing</td><td>TUD-Crossing</td><td>People-Tracking-by-Detection and People-Detection-by-Tracking</td><td>People-tracking-by-detection and people-detection-by-tracking</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=people-tracking-by-detection and people-detection-by-tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td></tr><tr><td>tud_motionpairs</td><td>TUD-Motionparis</td><td>Multi-Cue Onboard Pedestrian Detection</td><td>Multi-cue onboard pedestrian detection</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-cue onboard pedestrian detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6ad5a38df8dd4cdddd74f31996ce096d41219f72</td></tr><tr><td>tud_pedestrian</td><td>TUD-Pedestrian</td><td>People-Tracking-by-Detection and People-Detection-by-Tracking</td><td>People-tracking-by-detection and people-detection-by-tracking</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=people-tracking-by-detection and people-detection-by-tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td></tr><tr><td>tvhi</td><td>TVHI</td><td>High Five: Recognising human interactions in TV shows</td><td>High Five: Recognising human interactions in TV shows</td><td><a href="https://pdfs.semanticscholar.org/3cd4/0bfa1ff193a96bde0207e5140a399476466c.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=high five: recognising human interactions in tv shows&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3cd40bfa1ff193a96bde0207e5140a399476466c</td></tr><tr><td>uccs</td><td>UCCS</td><td>Large scale unconstrained open set face database</td><td>Large scale unconstrained open set face database</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=large scale unconstrained open set face database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>07fcbae86f7a3ad3ea1cf95178459ee9eaf77cb1</td></tr><tr><td>uccs</td><td>UCCS</td><td>Unconstrained Face Detection and Open-Set Face Recognition Challenge</td><td>Unconstrained Face Detection and Open-Set Face Recognition Challenge</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=unconstrained face detection and open-set face recognition challenge&sort=relevance" target="_blank">[s2]</a></td><td></td><td>d4f1eb008eb80595bcfdac368e23ae9754e1e745</td></tr><tr><td>ucf_101</td><td>UCF101</td><td>UCF101: A Dataset of 101 Human Actions Classes From Videos in The Wild</td><td>UCF101: A Dataset of 101 Human Actions Classes From Videos in The Wild</td><td><a href="https://arxiv.org/pdf/1212.0402.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=ucf101: a dataset of 101 human actions classes from videos in the wild&sort=relevance" target="_blank">[s2]</a></td><td></td><td>b5f2846a506fc417e7da43f6a7679146d99c5e96</td></tr><tr><td>ucf_crowd</td><td>UCF-CC-50</td><td>Multi-Source Multi-Scale Counting in Extremely Dense Crowd Images</td><td>Multi-source Multi-scale Counting in Extremely Dense Crowd Images</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-source multi-scale counting in extremely dense crowd images&sort=relevance" target="_blank">[s2]</a></td><td></td><td>32c801cb7fbeb742edfd94cccfca4934baec71da</td></tr><tr><td>ucf_selfie</td><td>UCF Selfie</td><td>How to Take a Good Selfie?</td><td>How to Take a Good Selfie?</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=how to take a good selfie?&sort=relevance" target="_blank">[s2]</a></td><td></td><td>041d3eedf5e45ce5c5229f0181c5c576ed1fafd6</td></tr><tr><td>ufdd</td><td>UFDD</td><td>Pushing the Limits of Unconstrained Face Detection: a Challenge Dataset and Baseline Results</td><td>Pushing the Limits of Unconstrained Face Detection: a Challenge Dataset and Baseline Results</td><td><a href="https://arxiv.org/pdf/1804.10275.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pushing the limits of unconstrained face detection: a challenge dataset and baseline results&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3531332efe19be21e7401ba1f04570a142617236</td></tr><tr><td>umb</td><td>UMB</td><td>UMB-DB: A Database of Partially Occluded 3D Faces</td><td>UMB-DB: A database of partially occluded 3D faces</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=umb-db: a database of partially occluded 3d faces&sort=relevance" target="_blank">[s2]</a></td><td></td><td>16e8b0a1e8451d5f697b94c0c2b32a00abee1d52</td></tr><tr><td>umd_faces</td><td>UMD</td><td>UMDFaces: An Annotated Face Dataset for Training Deep Networks</td><td>UMDFaces: An annotated face dataset for training deep networks</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=umdfaces: an annotated face dataset for training deep networks&sort=relevance" target="_blank">[s2]</a></td><td></td><td>31b05f65405534a696a847dd19c621b7b8588263</td></tr><tr><td>umd_faces</td><td>UMD</td><td>The Do's and Don'ts for CNN-based Face Verification</td><td>The Do’s and Don’ts for CNN-Based Face Verification</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the do's and don'ts for cnn-based face verification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>71b7fc715e2f1bb24c0030af8d7e7b6e7cd128a6</td></tr><tr><td>urban_tribes</td><td>Urban Tribes</td><td>From Bikers to Surfers: Visual Recognition of Urban Tribes</td><td>From Bikers to Surfers: Visual Recognition of Urban Tribes</td><td><a href="http://vision.cornell.edu/se3/wp-content/uploads/2014/09/utribes_bmvc13_final.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=from bikers to surfers: visual recognition of urban tribes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>774cbb45968607a027ae4729077734db000a1ec5</td></tr><tr><td>vgg_celebs_in_places</td><td>CIP</td><td>Faces in Places: Compound Query Retrieval</td><td>Faces in Places: compound query retrieval</td><td><a href="https://pdfs.semanticscholar.org/7ebb/153704706e457ab57b432793d2b6e5d12592.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=faces in places: compound query retrieval&sort=relevance" target="_blank">[s2]</a></td><td></td><td>7ebb153704706e457ab57b432793d2b6e5d12592</td></tr><tr><td>vgg_faces</td><td>VGG Face</td><td>Deep Face Recognition</td><td>Deep Face Recognition</td><td><a href="https://pdfs.semanticscholar.org/f372/ab9b3270d4e4f6a0258c83c2736c3a5c0454.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=deep face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>162ea969d1929ed180cc6de9f0bf116993ff6e06</td></tr><tr><td>vgg_faces2</td><td>VGG Face2</td><td>VGGFace2: A dataset for recognising faces across pose and age</td><td>VGGFace2: A Dataset for Recognising Faces across Pose and Age</td><td><a href="https://arxiv.org/pdf/1710.08092.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=vggface2: a dataset for recognising faces across pose and age&sort=relevance" target="_blank">[s2]</a></td><td></td><td>70c59dc3470ae867016f6ab0e008ac8ba03774a1</td></tr><tr><td>viper</td><td>VIPeR</td><td>Evaluating Appearance Models for Recognition, Reacquisition, and Tracking</td><td>Evaluating Appearance Models for Recognition, Reacquisition, and Tracking</td><td><a href="https://pdfs.semanticscholar.org/7847/b1fbccadb780b655e72c66d3f9e93ddb880c.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=evaluating appearance models for recognition, reacquisition, and tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6273b3491e94ea4dd1ce42b791d77bdc96ee73a8</td></tr><tr><td>voc</td><td>VOC</td><td>The PASCAL Visual Object Classes (VOC) Challenge</td><td>The Pascal Visual Object Classes (VOC) Challenge</td><td><a href="http://eprints.pascal-network.org/archive/00006187/01/PascalVOC_IJCV2009.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the pascal visual object classes (voc) challenge&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0ee1916a0cb2dc7d3add086b5f1092c3d4beb38a</td></tr><tr><td>voxceleb2</td><td>VoxCeleb2</td><td>VoxCeleb2: Deep Speaker Recognition</td><td>VoxCeleb2: Deep Speaker Recognition.</td><td><a href="https://pdfs.semanticscholar.org/8875/ae233bc074f5cd6c4ebba447b536a7e847a5.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=voxceleb2: deep speaker recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8875ae233bc074f5cd6c4ebba447b536a7e847a5</td></tr><tr><td>vqa</td><td>VQA</td><td>VQA: Visual Question Answering</td><td>VQA: Visual Question Answering</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=vqa: visual question answering&sort=relevance" target="_blank">[s2]</a></td><td></td><td>01959ef569f74c286956024866c1d107099199f7</td></tr><tr><td>wider</td><td>WIDER</td><td>Recognize Complex Events from Static Images by Fusing Deep Channels</td><td>Recognize complex events from static images by fusing deep channels</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=recognize complex events from static images by fusing deep channels&sort=relevance" target="_blank">[s2]</a></td><td></td><td>356b431d4f7a2a0a38cf971c84568207dcdbf189</td></tr><tr><td>wider_attribute</td><td>WIDER Attribute</td><td>Human Attribute Recognition by Deep Hierarchical Contexts</td><td>Human Attribute Recognition by Deep Hierarchical Contexts</td><td><a href="https://pdfs.semanticscholar.org/8e28/07f2dd53b03a759e372e07f7191cae65c9fd.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=human attribute recognition by deep hierarchical contexts&sort=relevance" target="_blank">[s2]</a></td><td></td><td>44d23df380af207f5ac5b41459c722c87283e1eb</td></tr><tr><td>wider_face</td><td>WIDER FACE</td><td>WIDER FACE: A Face Detection Benchmark</td><td>WIDER FACE: A Face Detection Benchmark</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=wider face: a face detection benchmark&sort=relevance" target="_blank">[s2]</a></td><td></td><td>52d7eb0fbc3522434c13cc247549f74bb9609c5d</td></tr><tr><td>wildtrack</td><td>WildTrack</td><td>WILDTRACK: A Multi-camera HD Dataset for Dense Unscripted Pedestrian Detection</td><td>WILDTRACK: A Multi-camera HD Dataset for Dense Unscripted Pedestrian Detection</td><td><a href="http://openaccess.thecvf.com/content_cvpr_2018/Supplemental/1562-supp.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=wildtrack: a multi-camera hd dataset for dense unscripted pedestrian detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>36bccfb2ad847096bc76777e544f305813cd8f5b</td></tr><tr><td>wlfdb</td><td>WLFDB</td><td>WLFDB: Weakly Labeled Face Databases</td><td>WLFDB : Weakly Labeled Face Databases</td><td><a href="https://pdfs.semanticscholar.org/5ad4/e9f947c1653c247d418f05dad758a3f9277b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=wlfdb: weakly labeled face databases&sort=relevance" target="_blank">[s2]</a></td><td></td><td>5ad4e9f947c1653c247d418f05dad758a3f9277b</td></tr><tr><td>yale_faces</td><td>YaleFaces</td><td>From Few to Many: Illumination Cone Models for Face Recognition under Variable Lighting and Pose</td><td>From Few to Many: Illumination Cone Models for Face Recognition under Variable Lighting and Pose</td><td><a href="http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.147.1487&rep=rep1&type=pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=from few to many: illumination cone models for face recognition under variable lighting and pose&sort=relevance" target="_blank">[s2]</a></td><td></td><td>18c72175ddbb7d5956d180b65a96005c100f6014</td></tr><tr><td>yale_faces</td><td>YaleFaces</td><td>Acquiring Linear Subspaces for Face Recognition under Variable Lighting</td><td>Acquiring linear subspaces for face recognition under variable lighting</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=acquiring linear subspaces for face recognition under variable lighting&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2ad0ee93d029e790ebb50574f403a09854b65b7e</td></tr><tr><td>yawdd</td><td>YawDD</td><td>YawDD: A Yawning Detection Dataset</td><td>YawDD: a yawning detection dataset</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=yawdd: a yawning detection dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a94cae786d515d3450d48267e12ca954aab791c4</td></tr><tr><td>yfcc_100m</td><td>YFCC100M</td><td>YFCC100M: The New Data in Multimedia Research</td><td>YFCC100M: the new data in multimedia research</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=yfcc100m: the new data in multimedia research&sort=relevance" target="_blank">[s2]</a></td><td></td><td>010f0f4929e6a6644fb01f0e43820f91d0fad292</td></tr><tr><td>york_3d</td><td>UOY 3D Face Database</td><td>Three-Dimensional Face Recognition: An Eigensurface Approach</td><td>Three-dimensional face recognition: an eigensurface approach</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=three-dimensional face recognition: an eigensurface approach&sort=relevance" target="_blank">[s2]</a></td><td></td><td>19d1b811df60f86cbd5e04a094b07f32fff7a32a</td></tr><tr><td>youtube_faces</td><td>YouTubeFaces</td><td>Face Recognition in Unconstrained Videos with Matched Background Similarity</td><td>Face recognition in unconstrained videos with matched background similarity</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=face recognition in unconstrained videos with matched background similarity&sort=relevance" target="_blank">[s2]</a></td><td></td><td>560e0e58d0059259ddf86fcec1fa7975dee6a868</td></tr><tr><td>youtube_poses</td><td>YouTube Pose</td><td>Personalizing Human Video Pose Estimation</td><td>Personalizing Human Video Pose Estimation</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=personalizing human video pose estimation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1c2802c2199b6d15ecefe7ba0c39bfe44363de38</td></tr></table></body></html>
\ No newline at end of file diff --git a/scraper/reports/paper_title_report_nonmatching.html b/scraper/reports/paper_title_report_nonmatching.html index 5889ec7c..7326a9f4 100644 --- a/scraper/reports/paper_title_report_nonmatching.html +++ b/scraper/reports/paper_title_report_nonmatching.html @@ -1 +1 @@ -<!doctype html><html><head><meta charset='utf-8'><title>Paper Titles that do not match</title><link rel='stylesheet' href='reports.css'></head><body><h2>Paper Titles that do not match</h2><table border='1' cellpadding='3' cellspacing='3'><th>key</th><th>name</th><th>our title</th><th>found title</th><th></th><th></th><th>address</th><th>s2 id</th></table></body></html>
\ No newline at end of file +<!doctype html><html><head><meta charset='utf-8'><title>Paper Titles that do not match</title><link rel='stylesheet' href='reports.css'></head><body><h2>Paper Titles that do not match</h2><table border='1' cellpadding='3' cellspacing='3'><th>key</th><th>name</th><th>our title</th><th>found title</th><th></th><th></th><th>address</th><th>s2 id</th><tr><td>10k_US_adult_faces</td><td>10K US Adult Faces</td><td>The intrinsic memorability of face images</td><td>The intrinsic memorability of face photographs.</td><td><a href="https://pdfs.semanticscholar.org/8b2d/d5c61b23ead5ae5508bb8ce808b5ea266730.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the intrinsic memorability of face images&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8b2dd5c61b23ead5ae5508bb8ce808b5ea266730</td></tr><tr><td>afad</td><td>AFAD</td><td>Ordinal Regression with a Multiple Output CNN for Age Estimation</td><td>Ordinal Regression with Multiple Output CNN for Age Estimation</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=ordinal regression with a multiple output cnn for age estimation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6618cff7f2ed440a0d2fa9e74ad5469df5cdbe4c</td></tr><tr><td>afw</td><td>AFW</td><td>Face detection, pose estimation and landmark localization in the wild</td><td>Face detection, pose estimation, and landmark localization in the wild</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=face detection, pose estimation and landmark localization in the wild&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0e986f51fe45b00633de9fd0c94d082d2be51406</td></tr><tr><td>am_fed</td><td>AM-FED</td><td>Affectiva MIT Facial Expression Dataset (AM-FED): Naturalistic and Spontaneous Facial Expressions Collected “In the Wild”</td><td>Affectiva-MIT Facial Expression Dataset (AM-FED): Naturalistic and Spontaneous Facial Expressions Collected "In-the-Wild"</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=affectiva mit facial expression dataset (am-fed): naturalistic and spontaneous facial expressions collected “in the wild”&sort=relevance" target="_blank">[s2]</a></td><td></td><td>47aeb3b82f54b5ae8142b4bdda7b614433e69b9a</td></tr><tr><td>bp4d_spontanous</td><td>BP4D-Spontanous</td><td>A high resolution spontaneous 3D dynamic facial expression database</td><td>A high-resolution spontaneous 3D dynamic facial expression database</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a high resolution spontaneous 3d dynamic facial expression database&sort=relevance" target="_blank">[s2]</a></td><td>SUNY Binghamton</td><td>b91f54e1581fbbf60392364323d00a0cd43e493c</td></tr><tr><td>casablanca</td><td>Casablanca</td><td>Context-aware {CNNs} for person head detection</td><td>Context-Aware CNNs for Person Head Detection</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=context-aware {cnns} for person head detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0ceda9dae8b9f322df65ca2ef02caca9758aec6f</td></tr><tr><td>cfd</td><td>CFD</td><td>The Chicago face database: A free stimulus set of faces and norming data</td><td>The Chicago face database: A free stimulus set of faces and norming data.</td><td><a href="https://pdfs.semanticscholar.org/4df3/143922bcdf7db78eb91e6b5359d6ada004d2.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the chicago face database: a free stimulus set of faces and norming data&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4df3143922bcdf7db78eb91e6b5359d6ada004d2</td></tr><tr><td>cmu_pie</td><td>CMU PIE</td><td>The CMU Pose, Illumination, and Expression Database</td><td>The CMU Pose, Illumination, and Expression (PIE) Database</td><td><a href="http://www.comp.nus.edu.sg/~tsim/piedb.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the cmu pose, illumination, and expression database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4d423acc78273b75134e2afd1777ba6d3a398973</td></tr><tr><td>columbia_gaze</td><td>Columbia Gaze</td><td>Gaze Locking: Passive Eye Contact Detection for Human–Object Interaction</td><td>Gaze locking: passive eye contact detection for human-object interaction</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=gaze locking: passive eye contact detection for human–object interaction&sort=relevance" target="_blank">[s2]</a></td><td>Columbia University</td><td>06f02199690961ba52997cde1527e714d2b3bf8f</td></tr><tr><td>d3dfacs</td><td>D3DFACS</td><td>A FACS Valid 3D Dynamic Action Unit database with Applications to 3D Dynamic Morphable Facial Modelling</td><td>A FACS valid 3D dynamic action unit database with applications to 3D dynamic morphable facial modeling</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a facs valid 3d dynamic action unit database with applications to 3d dynamic morphable facial modelling&sort=relevance" target="_blank">[s2]</a></td><td></td><td>070de852bc6eb275d7ca3a9cdde8f6be8795d1a3</td></tr><tr><td>dartmouth_children</td><td>Dartmouth Children</td><td>The Dartmouth Database of Children's Faces: Acquisition and validation of a new face stimulus set</td><td>The Dartmouth Database of Children’s Faces: Acquisition and Validation of a New Face Stimulus Set</td><td><a href="https://pdfs.semanticscholar.org/4e6e/e936eb50dd032f7138702fa39b7c18ee8907.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the dartmouth database of children's faces: acquisition and validation of a new face stimulus set&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4e6ee936eb50dd032f7138702fa39b7c18ee8907</td></tr><tr><td>disfa</td><td>DISFA</td><td>1</td><td>DISFA: A Spontaneous Facial Action Intensity Database</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=1&sort=relevance" target="_blank">[s2]</a></td><td>University of Denver</td><td>5a5f0287484f0d480fed1ce585dbf729586f0edc</td></tr><tr><td>fei</td><td>FEI</td><td>Captura e Alinhamento de Imagens: Um Banco de Faces Brasileiro</td><td>A new ranking method for principal components analysis and its application to face image analysis</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=captura e alinhamento de imagens: um banco de faces brasileiro&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8b56e33f33e582f3e473dba573a16b598ed9bcdc</td></tr><tr><td>frgc</td><td>FRGC</td><td>Overview of the Face Recognition Grand Challenge</td><td>Overview of the face recognition grand challenge</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=overview of the face recognition grand challenge&sort=relevance" target="_blank">[s2]</a></td><td>NIST</td><td>18ae7c9a4bbc832b8b14bc4122070d7939f5e00e</td></tr><tr><td>geofaces</td><td>GeoFaces</td><td>FACE2GPS: Estimating geographic location from facial features</td><td>Exploring the geo-dependence of human face appearance</td><td><a href="http://cs.uky.edu/~jacobs/papers/islam2014faces.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=face2gps: estimating geographic location from facial features&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2cd7821fcf5fae53a185624f7eeda007434ae037</td></tr><tr><td>hda_plus</td><td>HDA+</td><td>A Multi-camera video data set for research on High-Definition surveillance</td><td>HDA dataset-DRAFT 1 A Multi-camera video data set for research on High-Definition surveillance</td><td><a href="https://pdfs.semanticscholar.org/bd88/bb2e4f351352d88ee7375af834360e223498.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a multi-camera video data set for research on high-definition surveillance&sort=relevance" target="_blank">[s2]</a></td><td></td><td>bd88bb2e4f351352d88ee7375af834360e223498</td></tr><tr><td>ibm_dif</td><td>IBM Diversity in Faces</td><td>Diversity in Faces</td><td>Facial Coding Scheme Reference 1 Craniofacial Distances</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=diversity in faces&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0ab7cff2ccda7269b73ff6efd9d37e1318f7db25</td></tr><tr><td>ijb_c</td><td>IJB-C</td><td>IARPA Janus Benchmark C</td><td>IARPA Janus Benchmark - C: Face Dataset and Protocol</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=iarpa janus benchmark c&sort=relevance" target="_blank">[s2]</a></td><td></td><td>57178b36c21fd7f4529ac6748614bb3374714e91</td></tr><tr><td>ilids_mcts</td><td>i-LIDS Multiple-Camera</td><td>Imagery Library for Intelligent Detection Systems: The i-LIDS User Guide</td><td>Imagery Library for Intelligent Detection Systems (i-LIDS); A Standard for Testing Video Based Detection Systems</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=imagery library for intelligent detection systems: the i-lids user guide&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0297448f3ed948e136bb06ceff10eccb34e5bb77</td></tr><tr><td>ilids_mcts_vid</td><td>iLIDS-VID</td><td>Person Re-Identication by Video Ranking</td><td>Person Re-identification by Video Ranking</td><td><a href="https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person re-identication by video ranking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>98bb029afe2a1239c3fdab517323066f0957b81b</td></tr><tr><td>images_of_groups</td><td>Images of Groups</td><td>Understanding Groups of Images of People</td><td>Understanding images of groups of people</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=understanding groups of images of people&sort=relevance" target="_blank">[s2]</a></td><td>Carnegie Mellon University</td><td>21d9d0deed16f0ad62a4865e9acf0686f4f15492</td></tr><tr><td>lfw</td><td>LFW</td><td>Labeled Faces in the Wild: Updates and New Reporting Procedures</td><td>Labeled Faces in the Wild : Updates and New Reporting Procedures</td><td><a href="https://pdfs.semanticscholar.org/2d34/82dcff69c7417c7b933f22de606a0e8e42d4.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=labeled faces in the wild: updates and new reporting procedures&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2d3482dcff69c7417c7b933f22de606a0e8e42d4</td></tr><tr><td>lfw</td><td>LFW</td><td>Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments</td><td>Labeled Faces in the Wild: A Database forStudying Face Recognition in Unconstrained Environments</td><td><a href="https://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=labeled faces in the wild: a database for studying face recognition in unconstrained environments&sort=relevance" target="_blank">[s2]</a></td><td></td><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td></tr><tr><td>m2vtsdb_extended</td><td>xm2vtsdb</td><td>XM2VTSDB: The Extended M2VTS Database</td><td>XM2VTSDB : The extended M2VTS database</td><td><a href="https://pdfs.semanticscholar.org/b626/28ac06bbac998a3ab825324a41a11bc3a988.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=xm2vtsdb: the extended m2vts database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>b62628ac06bbac998a3ab825324a41a11bc3a988</td></tr><tr><td>malf</td><td>MALF</td><td>Fine-grained Evaluation on Face Detection in the Wild.</td><td>Fine-grained evaluation on face detection in the wild</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=fine-grained evaluation on face detection in the wild.&sort=relevance" target="_blank">[s2]</a></td><td></td><td>45e616093a92e5f1e61a7c6037d5f637aa8964af</td></tr><tr><td>mr2</td><td>MR2</td><td>The MR2: A multi-racial mega-resolution database of facial stimuli</td><td>The MR2: A multi-racial, mega-resolution database of facial stimuli.</td><td><a href="http://www.mpmlab.org/The%20MR2%20face%20database.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the mr2: a multi-racial mega-resolution database of facial stimuli&sort=relevance" target="_blank">[s2]</a></td><td></td><td>578d4ad74818086bb64f182f72e2c8bd31e3d426</td></tr><tr><td>multi_pie</td><td>MULTIPIE</td><td>Multi-PIE</td><td>The CMU Pose, Illumination, and Expression (PIE) Database</td><td><a href="http://www.comp.nus.edu.sg/~tsim/piedb.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-pie&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4d423acc78273b75134e2afd1777ba6d3a398973</td></tr><tr><td>names_and_faces</td><td>News Dataset</td><td>Names and Faces</td><td>Names and faces in the news</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=names and faces&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2fda164863a06a92d3a910b96eef927269aeb730</td></tr><tr><td>nova_emotions</td><td>Novaemötions Dataset</td><td>Competitive affective gamming: Winning with a smile</td><td>Competitive affective gaming: winning with a smile</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=competitive affective gamming: winning with a smile&sort=relevance" target="_blank">[s2]</a></td><td>Universidade NOVA de Lisboa, Caparica, Portugal</td><td>7f4040b482d16354d5938c1d1b926b544652bf5b</td></tr><tr><td>sdu_vid</td><td>SDU-VID</td><td>Person reidentification by video ranking</td><td>Person Re-identification by Video Ranking</td><td><a href="https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person reidentification by video ranking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>98bb029afe2a1239c3fdab517323066f0957b81b</td></tr><tr><td>stanford_drone</td><td>Stanford Drone</td><td>Learning Social Etiquette: Human Trajectory Prediction In Crowded Scenes</td><td>Social LSTM: Human Trajectory Prediction in Crowded Spaces</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning social etiquette: human trajectory prediction in crowded scenes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>570f37ed63142312e6ccdf00ecc376341ec72b9f</td></tr><tr><td>stickmen_buffy</td><td>Buffy Stickmen</td><td>Learning to Parse Images of Articulated Objects</td><td>Learning to parse images of articulated bodies</td><td><a href="http://machinelearning.wustl.edu/mlpapers/paper_files/NIPS2006_899.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning to parse images of articulated objects&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td></tr><tr><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td>Learning to parse images of articulated bodies</td><td><a href="http://machinelearning.wustl.edu/mlpapers/paper_files/NIPS2006_899.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clustered pose and nonlinear appearance models for human pose estimation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td></tr><tr><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td>Learning to Parse Images of Articulated Objects</td><td>Learning to parse images of articulated bodies</td><td><a href="http://machinelearning.wustl.edu/mlpapers/paper_files/NIPS2006_899.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning to parse images of articulated objects&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td></tr><tr><td>tiny_images</td><td>Tiny Images</td><td>80 million tiny images: a large dataset for non-parametric object and scene recognition</td><td>80 Million Tiny Images: A Large Data Set for Nonparametric Object and Scene Recognition</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=80 million tiny images: a large dataset for non-parametric object and scene recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>31b58ced31f22eab10bd3ee2d9174e7c14c27c01</td></tr><tr><td>umd_faces</td><td>UMD</td><td>The Do's and Don'ts for CNN-based Face Verification</td><td>The Do’s and Don’ts for CNN-Based Face Verification</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the do's and don'ts for cnn-based face verification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>71b7fc715e2f1bb24c0030af8d7e7b6e7cd128a6</td></tr><tr><td>voxceleb2</td><td>VoxCeleb2</td><td>VoxCeleb2: Deep Speaker Recognition</td><td>VoxCeleb2: Deep Speaker Recognition.</td><td><a href="https://pdfs.semanticscholar.org/8875/ae233bc074f5cd6c4ebba447b536a7e847a5.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=voxceleb2: deep speaker recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8875ae233bc074f5cd6c4ebba447b536a7e847a5</td></tr><tr><td>who_goes_there</td><td>WGT</td><td>Who Goes There? Approaches to Mapping Facial Appearance Diversity</td><td>Who goes there?: approaches to mapping facial appearance diversity</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=who goes there? approaches to mapping facial appearance diversity&sort=relevance" target="_blank">[s2]</a></td><td>University of Kentucky</td><td>9b9bf5e623cb8af7407d2d2d857bc3f1b531c182</td></tr><tr><td>wlfdb</td><td>WLFDB</td><td>WLFDB: Weakly Labeled Face Databases</td><td>WLFDB : Weakly Labeled Face Databases</td><td><a href="https://pdfs.semanticscholar.org/5ad4/e9f947c1653c247d418f05dad758a3f9277b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=wlfdb: weakly labeled face databases&sort=relevance" target="_blank">[s2]</a></td><td></td><td>5ad4e9f947c1653c247d418f05dad758a3f9277b</td></tr></table></body></html>
\ No newline at end of file diff --git a/site/datasets/citations/brainwash.json b/site/datasets/citations/brainwash.json index cff0b7b7..17db4acf 100644 --- a/site/datasets/citations/brainwash.json +++ b/site/datasets/citations/brainwash.json @@ -1 +1 @@ -{"id": "1bd1645a629f1b612960ab9bba276afd4cf7c666", "paper": {"key": "brainwash", "name": "Brainwash", "title": "End-to-End People Detection in Crowded Scenes", "year": "2016", "addresses": [{"name": "Stanford University", "source_name": "Stanford University", "street_adddress": "Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA", "lat": "37.43131385", "lng": "-122.16936535", "type": "edu", "country": "United States"}]}, "citations": [{"id": "e35515f699b60472ac8f50d1da84fab3c55417d6", "title": "Key Parts Context and Scene Geometry in Human Head Detection", "addresses": [{"name": "Tsinghua University", "source_name": "Tsinghua University", "street_adddress": "\u6e05\u534e\u5927\u5b66, 30, \u53cc\u6e05\u8def, \u4e94\u9053\u53e3, \u540e\u516b\u5bb6, \u6d77\u6dc0\u533a, 100084, \u4e2d\u56fd", "lat": "40.00229045", "lng": "116.32098908", "type": "edu", "country": "China"}], "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8451832"]}, {"id": "5d5d267416aeb2bbddaf06f703e8683753abdcd0", "title": "Exploiting Multispectral and Contextual Information to Improve Human Detection", "addresses": [{"name": "State University of New Jersey", "source_name": "The State University of New Jersey", "street_adddress": "Rutgers New Brunswick: Livingston Campus, Joyce Kilmer Avenue, Piscataway Township, Middlesex County, New Jersey, 08854, USA", "lat": "40.51865195", "lng": "-74.44099801", "type": "edu", "country": "United States"}], "year": "2017", "pdf": ["https://pdfs.semanticscholar.org/5d5d/267416aeb2bbddaf06f703e8683753abdcd0.pdf"], "doi": []}, {"id": "0cf0ad8235929417d904acd1c672713ca4fdb105", "title": "Fusion of Head and Full-Body Detectors for Multi-object Tracking", "addresses": [{"name": "Technical University Munich", "source_name": "Technical University Munich", "street_adddress": "TUM, 21, Arcisstra\u00dfe, Bezirksteil K\u00f6nigsplatz, Stadtbezirk 03 Maxvorstadt, M\u00fcnchen, Obb, Bayern, 80333, Deutschland", "lat": "48.14955455", "lng": "11.56775314", "type": "edu", "country": "Germany"}], "year": "2018", "pdf": ["https://arxiv.org/pdf/1705.08314.pdf"], "doi": []}, {"id": "cab97d4dc67919f965cc884d80e4d6b743a256eb", "title": "Scale Mapping and Dynamic Re-Detecting in Dense Head Detection", "addresses": [{"name": "South China University of Technology", "source_name": "South China University of Technology", "street_adddress": "\u534e\u5357\u7406\u5de5\u5927\u5b66, \u5927\u5b66\u57ce\u4e2d\u73af\u4e1c\u8def, \u5e7f\u5dde\u5927\u5b66\u57ce, \u65b0\u9020, \u756a\u79ba\u533a (Panyu), \u5e7f\u5dde\u5e02, \u5e7f\u4e1c\u7701, 510006, \u4e2d\u56fd", "lat": "23.05020420", "lng": "113.39880323", "type": "edu", "country": "China"}], "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8451653"]}, {"id": "8a6665bfe3ad126d84414280ee2f884818560063", "title": "Vehicle Detection in Urban Traffic Surveillance Images Based on Convolutional Neural Networks with Feature Concatenation", "addresses": [{"name": "China University of Mining and Technology", "source_name": "China University of Mining and Technology", "street_adddress": "China University of Mining and Technology, 1\u53f7, \u5927\u5b66\u8def, \u6cc9\u5c71\u533a (Quanshan), \u5f90\u5dde\u5e02 / Xuzhou, \u6c5f\u82cf\u7701, 221116, \u4e2d\u56fd", "lat": "34.21525380", "lng": "117.13985410", "type": "edu", "country": "China"}], "year": "2019", "pdf": ["https://pdfs.semanticscholar.org/8a66/65bfe3ad126d84414280ee2f884818560063.pdf"], "doi": []}, {"id": "333880ee776d57555f54935978ccb2ce13cfdb07", "title": "Person classification leveraging Convolutional Neural Network for obstacle avoidance via Unmanned Aerial Vehicles", "addresses": [{"name": "Cranfield University", "source_name": "Cranfield University, UK", "street_adddress": "College Rd, Wharley End, Bedford MK43 0AL, UK", "lat": "52.07418180", "lng": "-0.62781230", "type": "edu", "country": "United Kingdom"}], "year": "2017", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8101662"]}, {"id": "f30bdc22fe454a28234cf858d5bf9de94701ee7a", "title": "Adaptive NMS: Refining Pedestrian Detection in a Crowd", "addresses": [{"name": "Beihang University", "source_name": "Beihang University", "street_adddress": "\u5317\u4eac\u822a\u7a7a\u822a\u5929\u5927\u5b66, 37, \u5b66\u9662\u8def, \u4e94\u9053\u53e3, \u540e\u516b\u5bb6, \u6d77\u6dc0\u533a, 100083, \u4e2d\u56fd", "lat": "39.98083330", "lng": "116.34101249", "type": "edu", "country": "China"}], "year": "2019", "pdf": ["https://arxiv.org/pdf/1904.03629.pdf"], "doi": []}, {"id": "976647b32fd7e1a5c8ee4a11792155903bb34e43", "title": "Multi-Organ Plant Classification Based on Convolutional and Recurrent Neural Networks", "addresses": [{"name": "University of Malaya, Kuala Lumpur", "source_name": "University of Malaya", "street_adddress": "UM, Lingkaran Wawasan, Bukit Pantai, Bangsar, KL, 50603, Malaysia", "lat": "3.12267405", "lng": "101.65356103", "type": "edu", "country": "Malaysia"}, {"name": "Kingston University", "source_name": "Kingston University", "street_adddress": "Kingston University, Kingston Hill, Kingston Vale, Kingston-upon-Thames, London, Greater London, England, KT2 7TF, UK", "lat": "51.42930860", "lng": "-0.26840440", "type": "edu", "country": "United Kingdom"}], "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8359391"]}, {"id": "66a08ff2ea7447093624632e7069e3da16961d30", "title": "An Incremental Framework for Video-Based Traffic Sign Detection, Tracking, and Recognition", "addresses": [{"name": "Northwestern Polytechnical University", "source_name": "Northwestern Polytechnical University", "street_adddress": "\u897f\u5317\u5de5\u4e1a\u5927\u5b66 \u53cb\u8c0a\u6821\u533a, 127\u53f7, \u53cb\u8c0a\u897f\u8def, \u957f\u5b89\u8def, \u7891\u6797\u533a (Beilin), \u897f\u5b89\u5e02, \u9655\u897f\u7701, 710072, \u4e2d\u56fd", "lat": "34.24691520", "lng": "108.91061982", "type": "edu", "country": "China"}], "year": "2017", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7605450"]}, {"id": "6fc8c988dd841c6c4f5e96b1b1458b6aa564b2de", "title": "Crowd Counting via Scale-Adaptive Convolutional Neural Network", "addresses": [{"name": "Shanghai Jiaotong University", "source_name": "Shanghai Jiaotong University", "street_adddress": "China, Shanghai, Minhang, \u4e1c\u5ddd\u8def \u90ae\u653f\u7f16\u7801: 200240", "lat": "31.02522010", "lng": "121.43377840", "type": "edu", "country": "China"}], "year": "2018", "pdf": ["https://arxiv.org/pdf/1711.04433.pdf"], "doi": []}, {"id": "e607b91f69ea2bff3194b07c5d22b4625bbe306e", "title": "Learning to See People Like People : Predicting Social Impressions of Faces", "addresses": [{"name": "Purdue University", "source_name": "Purdue University", "street_adddress": "Purdue University, West Stadium Avenue, West Lafayette, Tippecanoe County, Indiana, 47907, USA", "lat": "40.43197220", "lng": "-86.92389368", "type": "edu", "country": "United States"}], "year": "2017", "pdf": ["https://pdfs.semanticscholar.org/e607/b91f69ea2bff3194b07c5d22b4625bbe306e.pdf"], "doi": []}, {"id": "737f3cf354f40a6a7fd8a2058fe2803b8dd6c56b", "title": "Image Crowd Counting Using Convolutional Neural Network and Markov Random Field", "addresses": [{"name": "Shanghai University", "source_name": "Shanghai University", "street_adddress": "\u4e0a\u6d77\u5927\u5b66, \u9526\u79cb\u8def, \u5927\u573a\u9547, \u5b9d\u5c71\u533a (Baoshan), \u4e0a\u6d77\u5e02, 201906, \u4e2d\u56fd", "lat": "31.32235655", "lng": "121.38400941", "type": "edu", "country": "China"}], "year": "2017", "pdf": ["https://pdfs.semanticscholar.org/737f/3cf354f40a6a7fd8a2058fe2803b8dd6c56b.pdf"], "doi": []}, {"id": "3e667c54e848233db092b794f2cfbf47ea63b771", "title": "Combined convolutional and recurrent neural networks for hierarchical classification of images", "addresses": [{"name": "Northwestern University", "source_name": "Northwestern University", "street_adddress": "Northwestern University, Northwestern Place, Downtown, Evanston, Cook County, Illinois, 60208, USA", "lat": "42.05511640", "lng": "-87.67581113", "type": "edu", "country": "United States"}], "year": "2018", "pdf": ["https://arxiv.org/pdf/1809.09574.pdf"], "doi": []}, {"id": "a5c8fc1ca4f06a344b53dc81ebc6d87f54896722", "title": "Learning to see people like people", "addresses": [{"name": "University of California, San Diego", "source_name": "University of California, San Diego", "street_adddress": "UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA", "lat": "32.87935255", "lng": "-117.23110049", "type": "edu", "country": "United States"}, {"name": "Purdue University", "source_name": "Purdue University", "street_adddress": "Purdue University, West Stadium Avenue, West Lafayette, Tippecanoe County, Indiana, 47907, USA", "lat": "40.43197220", "lng": "-86.92389368", "type": "edu", "country": "United States"}], "year": "2017", "pdf": ["https://arxiv.org/pdf/1705.04282.pdf"], "doi": []}, {"id": "7a9c2524a157a6b5d0204c2f3b187cd67eb26b4c", "title": "SINet: A Scale-Insensitive Convolutional Neural Network for Fast Vehicle Detection", "addresses": [{"name": "Hong Kong Polytechnic University", "source_name": "Hong Kong Polytechnic University", "street_adddress": "hong kong, 11, \u80b2\u624d\u9053 Yuk Choi Road, \u5c16\u6c99\u5480 Tsim Sha Tsui, \u6cb9\u5c16\u65fa\u5340 Yau Tsim Mong District, \u4e5d\u9f8d Kowloon, HK, 00000, \u4e2d\u56fd", "lat": "22.30457200", "lng": "114.17976285", "type": "edu", "country": "China"}, {"name": "Chinese University of Hong Kong", "source_name": "Chinese University of Hong Kong", "street_adddress": "Hong Kong, \u99ac\u6599\u6c34\u6c60\u65c1\u8def", "lat": "22.41626320", "lng": "114.21093180", "type": "edu", "country": "China"}, {"name": "South China University of Technology", "source_name": "South China University of Technology", "street_adddress": "\u534e\u5357\u7406\u5de5\u5927\u5b66, \u5927\u5b66\u57ce\u4e2d\u73af\u4e1c\u8def, \u5e7f\u5dde\u5927\u5b66\u57ce, \u65b0\u9020, \u756a\u79ba\u533a (Panyu), \u5e7f\u5dde\u5e02, \u5e7f\u4e1c\u7701, 510006, \u4e2d\u56fd", "lat": "23.05020420", "lng": "113.39880323", "type": "edu", "country": "China"}], "year": "2019", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8478157"]}, {"id": "c7f4e18ec3a805b8aee1d3d7552364c8f5f6ca1d", "title": "An Auto-adaptive CNN for Crowd Counting in Monitor Image", "addresses": [{"name": "Beijing University of Posts and Telecommunications", "source_name": "Beijing University of Posts and Telecommunications", "street_adddress": "\u5317\u4eac\u90ae\u7535\u5927\u5b66, \u897f\u571f\u57ce\u8def, \u6d77\u6dc0\u533a, \u5317\u4eac\u5e02, 100082, \u4e2d\u56fd", "lat": "39.96014880", "lng": "116.35193921", "type": "edu", "country": "China"}], "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8525541"]}, {"id": "b1db174463b0bbc54a61fcc83acfb89ad3e3d18f", "title": "Loss Functions for Multiset Prediction", "addresses": [{"name": "New York University", "source_name": "New York University", "street_adddress": "NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA", "lat": "40.72925325", "lng": "-73.99625394", "type": "edu", "country": "United States"}], "year": "2018", "pdf": ["https://arxiv.org/pdf/1711.05246.pdf"], "doi": []}, {"id": "81ba5202424906f64b77f68afca063658139fbb2", "title": "Social Scene Understanding: End-to-End Multi-person Action Localization and Collective Activity Recognition", "addresses": [{"name": "Stanford University", "source_name": "Stanford University", "street_adddress": "Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA", "lat": "37.43131385", "lng": "-122.16936535", "type": "edu", "country": "United States"}, {"name": "IDIAP Research Institute", "source_name": "IDIAP Research Institute", "street_adddress": "Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra", "lat": "46.10923700", "lng": "7.08453549", "type": "edu", "country": "Switzerland"}], "year": "2017", "pdf": ["https://arxiv.org/pdf/1611.09078.pdf"], "doi": []}, {"id": "830466596c0908399760f2009a09ce605e3121c9", "title": "Revisiting Perspective Information for Efficient Crowd Counting", "addresses": [{"name": "Peking University", "source_name": "Peking University", "street_adddress": "\u5317\u4eac\u5927\u5b66, 5\u53f7, \u9890\u548c\u56ed\u8def, \u7a3b\u9999\u56ed\u5357\u793e\u533a, \u6d77\u6dc0\u533a, \u5317\u4eac\u5e02, 100871, \u4e2d\u56fd", "lat": "39.99223790", "lng": "116.30393816", "type": "edu", "country": "China"}, {"name": "Tongji University", "source_name": "Tongji University", "street_adddress": "\u540c\u6d4e\u5927\u5b66, 1239, \u56db\u5e73\u8def, \u6c5f\u6e7e, \u8679\u53e3\u533a, \u4e0a\u6d77\u5e02, 200092, \u4e2d\u56fd", "lat": "31.28473925", "lng": "121.49694909", "type": "edu", "country": "China"}], "year": "2018", "pdf": ["https://arxiv.org/pdf/1807.01989.pdf"], "doi": []}, {"id": "1d1a2387663cb3510d6f5c0651c1dc55e48dcabc", "title": "Joint Pedestrian and Body Part Detection via Semantic Relationship Learning", "addresses": [{"name": "Hebei University of Technology", "source_name": "Hebei University of Technology, Tianjin, P. R. China", "street_adddress": "8 Dingzigu 1st Rd, Hongqiao Qu, China, 300131", "lat": "39.17963500", "lng": "117.16588200", "type": "edu", "country": "China"}, {"name": "Chinese Academy of Sciences", "source_name": "Chinese Academy of Sciences", "street_adddress": "\u4e2d\u56fd\u79d1\u5b66\u9662\u5fc3\u7406\u7814\u7a76\u6240, 16, \u6797\u8403\u8def, \u671d\u9633\u533a / Chaoyang, \u5317\u4eac\u5e02, 100101, \u4e2d\u56fd", "lat": "40.00447950", "lng": "116.37023800", "type": "edu", "country": "China"}, {"name": "Beijing Information Science and Technology University", "source_name": "Beijing Information Science and Technology University, Beijing, China", "street_adddress": "China, Beijing, Haidian, \u6e05\u6cb3\u56db\u62d4\u5b50", "lat": "40.04332040", "lng": "116.34181090", "type": "edu", "country": "China"}], "year": "2019", "pdf": ["https://pdfs.semanticscholar.org/1d1a/2387663cb3510d6f5c0651c1dc55e48dcabc.pdf"], "doi": []}, {"id": "91d2f0b7c23239740fa15dff0b5893b992c0ab34", "title": "Sequence-to-Segment Networks for Segment Detection", "addresses": [{"name": "Stony Brook University", "source_name": "Stony Brook University", "street_adddress": "Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA", "lat": "40.91531960", "lng": "-73.12706260", "type": "edu", "country": "United States"}], "year": "2018", "pdf": ["https://pdfs.semanticscholar.org/91d2/f0b7c23239740fa15dff0b5893b992c0ab34.pdf"], "doi": []}, {"id": "8e7c647d8e8ba726b03f7e7c5cc395f86b9de9be", "title": "Parsing Pose of People with Interaction", "addresses": [{"name": "California Institute of Technology", "source_name": "California Institute of Technology", "street_adddress": "California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA", "lat": "34.13710185", "lng": "-118.12527487", "type": "edu", "country": "United States"}], "year": "2018", "pdf": ["https://pdfs.semanticscholar.org/8e7c/647d8e8ba726b03f7e7c5cc395f86b9de9be.pdf"], "doi": []}, {"id": "2f02c1d4858d9c0f5d16099eb090560d5fa4f23f", "title": "Detecting Heads using Feature Refine Net and Cascaded Multi-scale Architecture", "addresses": [{"name": "South China University of Technology", "source_name": "South China University of Technology", "street_adddress": "\u534e\u5357\u7406\u5de5\u5927\u5b66, \u5927\u5b66\u57ce\u4e2d\u73af\u4e1c\u8def, \u5e7f\u5dde\u5927\u5b66\u57ce, \u65b0\u9020, \u756a\u79ba\u533a (Panyu), \u5e7f\u5dde\u5e02, \u5e7f\u4e1c\u7701, 510006, \u4e2d\u56fd", "lat": "23.05020420", "lng": "113.39880323", "type": "edu", "country": "China"}], "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8545068"]}, {"id": "c43c3ec39005aaf1f0ad2f641d92f41021342217", "title": "Pedestrian Detection with Autoregressive Network Phases", "addresses": [{"name": "Michigan State University", "source_name": "Michigan State University", "street_adddress": "Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu", "country": "United States"}], "year": "2018", "pdf": ["https://arxiv.org/pdf/1812.00440.pdf"], "doi": []}, {"id": "e1a784e360f2bf3911cffa30506d46c412659db5", "title": "MetaAnchor: Learning to Detect Objects with Customized Anchors", "addresses": [{"name": "Fudan University", "source_name": "Fudan University", "street_adddress": "\u590d\u65e6\u5927\u5b66, 220, \u90af\u90f8\u8def, \u4e94\u89d2\u573a\u8857\u9053, \u6768\u6d66\u533a, \u4e0a\u6d77\u5e02, 200433, \u4e2d\u56fd", "lat": "31.30104395", "lng": "121.50045497", "type": "edu", "country": "China"}], "year": "2018", "pdf": ["https://arxiv.org/pdf/1807.00980.pdf"], "doi": []}, {"id": "9da3ade53bf91556fe46828b216aab20a4e72294", "title": "SNc Neuron Detection Method Based on Deep Learning for Efficacy Evaluation of Anti-PD Drugs", "addresses": [{"name": "Peking University", "source_name": "Peking University", "street_adddress": "\u5317\u4eac\u5927\u5b66, 5\u53f7, \u9890\u548c\u56ed\u8def, \u7a3b\u9999\u56ed\u5357\u793e\u533a, \u6d77\u6dc0\u533a, \u5317\u4eac\u5e02, 100871, \u4e2d\u56fd", "lat": "39.99223790", "lng": "116.30393816", "type": "edu", "country": "China"}, {"name": "Shanghai Jiao Tong University", "source_name": "Shanghai Jiao Tong University", "street_adddress": "\u4e0a\u6d77\u4ea4\u901a\u5927\u5b66\uff08\u5f90\u6c47\u6821\u533a\uff09, \u6dee\u6d77\u897f\u8def, \u756a\u79ba\u5c0f\u533a, \u5e73\u9634\u6865, \u5f90\u6c47\u533a, \u4e0a\u6d77\u5e02, 200052, \u4e2d\u56fd", "lat": "31.20081505", "lng": "121.42840681", "type": "edu", "country": "China"}], "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8431470"]}, {"id": "cfa48bc1015b88809e362b4da19fe4459acb1d89", "title": "Learning to Filter Object Detections", "addresses": [{"name": "Max Planck Institute for Intelligent Systems", "source_name": "Max Planck Institute for Intelligent Systems", "street_adddress": "Heisenbergstra\u00dfe 3, 70569 Stuttgart, Germany", "lat": "48.74689390", "lng": "9.08051410", "type": "edu", "country": "Germany"}, {"name": "Microsoft", "source_name": "Microsoft Corporation, Redmond, WA, USA", "street_adddress": "One Microsoft Way, Redmond, WA 98052, USA", "lat": "47.64233180", "lng": "-122.13693020", "type": "company", "country": "United States"}], "year": "2017", "pdf": ["https://pdfs.semanticscholar.org/cfa4/8bc1015b88809e362b4da19fe4459acb1d89.pdf"], "doi": []}, {"id": "eb5780e49ad7e713e9f34cf067d45030fb29753e", "title": "Iterative fully convolutional neural networks for automatic vertebra segmentation and identification", "addresses": [{"name": "University Medical Center Utrecht", "source_name": "Rudolf Magnus Institute of Neuroscience, University Medical Center Utrecht, Utrecht, The Netherlands", "street_adddress": "Vondellaan 94, 3521 GH Utrecht, Netherlands", "lat": "52.07869500", "lng": "5.11974690", "type": "edu", "country": "Netherlands"}], "year": "2018", "pdf": ["https://arxiv.org/pdf/1804.04383.pdf"], "doi": []}, {"id": "54c9343c247ce5a82bc52f83df84c2bbae737ce4", "title": "Deep Learning on Attributed Graphs: A Journey from Graphs to Their Embeddings and Back. (L'apprentissage profond sur graphes attribu\u00e9s: Un voyage aller-retour aux plongements des graphes)", "addresses": [{"name": "INRIA Sophia Antipolis", "source_name": "INRIA Sophia Antipolis Meditérannée, Valbonne, France", "street_adddress": "2004 Route des Lucioles, 06902 Valbonne, France", "lat": "43.61581310", "lng": "7.06838000", "type": "edu", "country": "France"}], "year": "2018", "pdf": ["https://arxiv.org/pdf/1901.08296.pdf"], "doi": []}, {"id": "2d81cf3214281af85eb1d9d270a897d62302e88e", "title": "High density people estimation in video surveillance", "addresses": [{"name": "AvidBeam", "source_name": "AvidBeam", "street_adddress": "5 Wadi Al Nile, Maadi Al Khabiri Ash Sharqeyah, Al Maadi, Cairo Governorate 11728, Egypt", "lat": "29.95606300", "lng": "31.25547100", "type": "company", "country": "Egypt"}, {"name": "Faculty of Media Engineering & Technology German University in Cairo", "source_name": "Faculty of Media Engineering & Technology German University in Cairo, Egypt", "street_adddress": "Cairo Governorate, Egypt", "lat": "29.98663810", "lng": "31.44142180", "type": "edu", "country": "Egypt"}, {"name": "German University in Cairo", "source_name": "Faculty of Media Engineering and Technology, German University in Cairo, Egypt", "street_adddress": "Cairo Governorate, Egypt", "lat": "29.98663810", "lng": "31.44142180", "type": "edu", "country": "Egypt"}], "year": "2017", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8275348"]}, {"id": "d54d0dc1e7ef82f3ed0ee332e0777b6d73477a8c", "title": "Sidewalk-level People Flow Estimation Using Dashboard Cameras Based on Deep Learning", "addresses": [{"name": "Osaka University", "source_name": "Osaka University", "street_adddress": "\u5927\u962a\u5927\u5b66\u6e05\u660e\u5bee, \u670d\u90e8\u897f\u753a\u56db\u4e01\u76ee, \u8c4a\u4e2d\u5e02, \u5927\u962a\u5e9c, \u8fd1\u757f\u5730\u65b9, \u65e5\u672c", "lat": "34.80809035", "lng": "135.45785218", "type": "edu", "country": "Japan"}], "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8653595"]}, {"id": "9607f88fe6298f754e3f610c3a587d8e10b1b0a9", "title": "Scene Text Detection with Recurrent Instance Segmentation", "addresses": [{"name": "National Laboratory of Pattern Recognition", "source_name": "National Laboratory of Pattern Recognition & Chinese Academy of Sciences & University of Chinese Academy of Sciences, Beijing, China", "street_adddress": "China, Beijing, Haidian, Zhongguancun South 1st Alley, \u4e2d\u5173\u6751\u5357\u4e00\u6761", "lat": "39.98177000", "lng": "116.33008600", "type": "edu", "country": "China"}], "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8545073"]}, {"id": "ed2ba6448db8cf945ca24d4df11916c2c5c3edd1", "title": "Rapid Pedestrian Detection Based on Deep Omega-Shape Features with Partial Occlusion Handing", "addresses": [{"name": "University of Electronic Science and Technology of China", "source_name": "University of Electronic Science and Technology of China", "street_adddress": "2 Jianshe North Rd 2nd Section, Jianshe Road, Chenghua Qu, Chengdu Shi, Sichuan Sheng, China, 610054", "lat": "30.67272100", "lng": "104.09880600", "type": "edu", "country": "China"}], "year": "2018", "pdf": [], "doi": ["http://doi.org/10.1007/s11063-018-9837-1"]}, {"id": "deea683731f468c7234e1089f48c4546e7003b18", "title": "Multi-Task Vehicle Detection With Region-of-Interest Voting", "addresses": [{"name": "Alibaba Group, Hangzhou, China", "source_name": "Alibaba Group, Hangzhou, China", "street_adddress": "Alibaba Group, \u4e94\u5e38\u8857\u9053, \u4f59\u676d\u533a (Yuhang), \u676d\u5dde\u5e02 Hangzhou, \u6d59\u6c5f\u7701, \u4e2d\u56fd", "lat": "30.28106540", "lng": "120.02139087", "type": "edu", "country": "China"}, {"name": "Alibaba, Hangzhou, China", "source_name": "Alibaba, Hangzhou, China", "street_adddress": "699 Wangshang Rd, Binjiang Qu, Hangzhou Shi, Zhejiang Sheng, China", "lat": "30.18996400", "lng": "120.19210000", "type": "edu", "country": "China"}, {"name": "Zhejiang University", "source_name": "Zhejiang University", "street_adddress": "\u6d59\u6c5f\u5927\u5b66\u4e4b\u6c5f\u6821\u533a, \u4e4b\u6c5f\u8def, \u8f6c\u5858\u8857\u9053, \u897f\u6e56\u533a (Xihu), \u676d\u5dde\u5e02 Hangzhou, \u6d59\u6c5f\u7701, 310008, \u4e2d\u56fd", "lat": "30.19331415", "lng": "120.11930822", "type": "edu", "country": "China"}], "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8066331", "http://doi.org/10.1109/TIP.2017.2762591", "https://www.ncbi.nlm.nih.gov/pubmed/29028197", "https://www.wikidata.org/entity/Q50063923"]}, {"id": "1303b3e9f4bffb87b6dd34f7849f7a57e00253d1", "title": "Point in, Box out: Beyond Counting Persons in Crowds", "addresses": [{"name": "Sichuan University, Chengdu", "source_name": "Sichuan Univ., Chengdu", "street_adddress": "\u56db\u5ddd\u5927\u5b66\uff08\u534e\u897f\u6821\u533a\uff09, \u6821\u4e1c\u8def, \u6b66\u4faf\u533a, \u6b66\u4faf\u533a (Wuhou), \u6210\u90fd\u5e02 / Chengdu, \u56db\u5ddd\u7701, 610014, \u4e2d\u56fd", "lat": "30.64276900", "lng": "104.06751175", "type": "edu", "country": "China"}, {"name": "INRIA", "source_name": "INRIA Grenoble Rhone-Alpes, Grenoble, France", "street_adddress": "655 Avenue de l'Europe, 38330 Montbonnot-Saint-Martin, France", "lat": "45.21788600", "lng": "5.80736900", "type": "edu", "country": "France"}], "year": "2019", "pdf": ["https://arxiv.org/pdf/1904.01333.pdf"], "doi": []}, {"id": "4140ccbca5a585162420b82e9e4eeb9afbd1e7c2", "title": "Simulating Crowds in Egress Scenarios", "addresses": [{"name": "Pontifical Catholic University of Rio Grande do Sul", "source_name": "Pontifical Catholic University of Rio Grande do Sul", "street_adddress": "Av. Ipiranga, 6681 - Partenon, Porto Alegre - RS, 90619-900, Brazil", "lat": "-30.05934460", "lng": "-51.17349120", "type": "edu", "country": "Brazil"}, {"name": "University of Pennsylvania", "source_name": "University of Pennsylvania", "street_adddress": "Penn Museum, 3260, South Street, University City, Philadelphia, Philadelphia County, Pennsylvania, 19104, USA", "lat": "39.94923440", "lng": "-75.19198985", "type": "edu", "country": "United States"}, {"name": "Federal University of Rio Grande do Sul", "source_name": "Institute of Informatics, Federal University of Rio Grande do Sul, Porto Alegre, Brazil", "street_adddress": "Av. Paulo Gama, 110 - Farroupilha, Porto Alegre - RS, 90040-060, Brazil", "lat": "-30.03382480", "lng": "-51.21882800", "type": "edu", "country": "Brazil"}], "year": "2017", "pdf": [], "doi": ["http://doi.org/10.1007/978-3-319-65202-3"]}, {"id": "5031a110219231ceaa820725c6e77f87f7b2fde2", "title": "Training with Confusion for Fine-Grained Visual Classification", "addresses": [{"name": "Harvard University", "source_name": "Harvard University", "street_adddress": "Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA", "lat": "42.36782045", "lng": "-71.12666653", "type": "edu", "country": "United States"}, {"name": "MIT", "source_name": "Massachusetts Institute", "street_adddress": "MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA", "lat": "42.35839610", "lng": "-71.09567788", "type": "edu", "country": "United States"}, {"name": "Brigham Young University", "source_name": "Brigham Young University, Provo, USA", "street_adddress": "Provo, UT 84602, USA", "lat": "40.25184350", "lng": "-111.64931560", "type": "edu", "country": "United States"}], "year": "2017", "pdf": ["https://pdfs.semanticscholar.org/5031/a110219231ceaa820725c6e77f87f7b2fde2.pdf"], "doi": []}, {"id": "86c158ef6caaf247d5d14e07c5edded0147df8b7", "title": "Spatial Memory for Context Reasoning in Object Detection", "addresses": [{"name": "Carnegie Mellon University", "source_name": "Carnegie Mellon University Pittsburgh, PA - 15213, USA", "street_adddress": "Carnegie Mellon University, Forbes Avenue, Squirrel Hill North, PGH, Allegheny County, Pennsylvania, 15213, USA", "lat": "40.44416190", "lng": "-79.94272826", "type": "edu", "country": "United States"}], "year": "2017", "pdf": ["https://arxiv.org/pdf/1704.04224.pdf"], "doi": []}, {"id": "bba153ebdf11e6fb8716e35749c671ac96c14176", "title": "Image Crowd Counting Using Convolutional Neural Network and Markov Random Field", "addresses": [{"name": "Shanghai University", "source_name": "Shanghai University", "street_adddress": "\u4e0a\u6d77\u5927\u5b66, \u9526\u79cb\u8def, \u5927\u573a\u9547, \u5b9d\u5c71\u533a (Baoshan), \u4e0a\u6d77\u5e02, 201906, \u4e2d\u56fd", "lat": "31.32235655", "lng": "121.38400941", "type": "edu", "country": "China"}], "year": "2017", "pdf": ["https://arxiv.org/pdf/1706.03686.pdf"], "doi": []}, {"id": "fe5679c183cb432894d111bf02cc3243c89762ca", "title": "Adaptive Scenario Discovery for Crowd Counting", "addresses": [{"name": "East China Normal University", "source_name": "East China Normal University", "street_adddress": "\u534e\u4e1c\u5e08\u8303\u5927\u5b66, 3663, \u4e2d\u5c71\u5317\u8def, \u66f9\u5bb6\u6e21, \u666e\u9640\u533a, \u666e\u9640\u533a (Putuo), \u4e0a\u6d77\u5e02, 200062, \u4e2d\u56fd", "lat": "31.22849230", "lng": "121.40211389", "type": "edu", "country": "China"}], "year": "2018", "pdf": ["https://arxiv.org/pdf/1812.02393.pdf"], "doi": []}, {"id": "11824658170994e4d4655e8f688bace16a0d3e48", "title": "Multi-person Head Segmentation in Low Resolution Crowd Scenes Using Convolutional Encoder-Decoder Framework", "addresses": [{"name": "Qatar University", "source_name": "Qatar University", "street_adddress": "Qatar University, Roindabout 3, Al Tarfa (68), \u0623\u0645 \u0635\u0644\u0627\u0644, 24685, \u200f\u0642\u0637\u0631\u200e", "lat": "25.37461295", "lng": "51.48980354", "type": "edu", "country": "Qatar"}, {"name": "University of Warwick", "source_name": "University of Warwick", "street_adddress": "University of Warwick, University Road, Kirby Corner, Cannon Park, Coventry, West Midlands Combined Authority, West Midlands, England, CV4 7AL, UK", "lat": "52.37931310", "lng": "-1.56042520", "type": "edu", "country": "United Kingdom"}], "year": "2017", "pdf": ["https://pdfs.semanticscholar.org/1182/4658170994e4d4655e8f688bace16a0d3e48.pdf"], "doi": []}, {"id": "7e915bb8e4ada4f8d261bc855a4f587ea97764ca", "title": "People detection in crowded scenes via regional-based convolutional network", "addresses": [{"name": "Peking University", "source_name": "Peking University", "street_adddress": "\u5317\u4eac\u5927\u5b66, 5\u53f7, \u9890\u548c\u56ed\u8def, \u7a3b\u9999\u56ed\u5357\u793e\u533a, \u6d77\u6dc0\u533a, \u5317\u4eac\u5e02, 100871, \u4e2d\u56fd", "lat": "39.99223790", "lng": "116.30393816", "type": "edu", "country": "China"}], "year": "2016", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7877809", "http://doi.org/10.1109/ICSP.2016.7877809"]}, {"id": "591a4bfa6380c9fcd5f3ae690e3ac5c09b7bf37b", "title": "A Replacement Algorithm of Non-Maximum Suppression Base on Graph Clustering", "addresses": [{"name": "National University of Defense Technology, China", "source_name": "National University of Defence Technology, Changsha 410000, China", "street_adddress": "\u56fd\u9632\u79d1\u5b66\u6280\u672f\u5927\u5b66, \u4e09\u4e00\u5927\u9053, \u5f00\u798f\u533a, \u5f00\u798f\u533a (Kaifu), \u957f\u6c99\u5e02 / Changsha, \u6e56\u5357\u7701, 410073, \u4e2d\u56fd", "lat": "28.22902090", "lng": "112.99483204", "type": "mil", "country": "China"}], "year": "2017", "pdf": ["https://pdfs.semanticscholar.org/591a/4bfa6380c9fcd5f3ae690e3ac5c09b7bf37b.pdf"], "doi": []}, {"id": "b02d31c640b0a31fb18c4f170d841d8e21ffb66c", "title": "Localized region context and object feature fusion for people head detection", "addresses": [{"name": "National University of Defense Technology, China", "source_name": "National University of Defence Technology, Changsha 410000, China", "street_adddress": "\u56fd\u9632\u79d1\u5b66\u6280\u672f\u5927\u5b66, \u4e09\u4e00\u5927\u9053, \u5f00\u798f\u533a, \u5f00\u798f\u533a (Kaifu), \u957f\u6c99\u5e02 / Changsha, \u6e56\u5357\u7701, 410073, \u4e2d\u56fd", "lat": "28.22902090", "lng": "112.99483204", "type": "mil", "country": "China"}], "year": "2016", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7532426", "http://doi.org/10.1109/ICIP.2016.7532426"]}]}
\ No newline at end of file +{"id": "1bd1645a629f1b612960ab9bba276afd4cf7c666", "paper": {"key": "brainwash", "name": "Brainwash", "title": "End-to-End People Detection in Crowded Scenes", "year": "2016", "addresses": [{"name": "Stanford University", "source_name": "Stanford University", "street_adddress": "Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA", "lat": "37.43131385", "lng": "-122.16936535", "type": "edu", "country": "United States"}]}, "citations": [{"id": "e35515f699b60472ac8f50d1da84fab3c55417d6", "title": "Key Parts Context and Scene Geometry in Human Head Detection", "addresses": [{"name": "Tsinghua University", "source_name": "Tsinghua University", "street_adddress": "\u6e05\u534e\u5927\u5b66, 30, \u53cc\u6e05\u8def, \u4e94\u9053\u53e3, \u540e\u516b\u5bb6, \u6d77\u6dc0\u533a, 100084, \u4e2d\u56fd", "lat": "40.00229045", "lng": "116.32098908", "type": "edu", "country": "China"}], "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8451832"]}, {"id": "5d5d267416aeb2bbddaf06f703e8683753abdcd0", "title": "Exploiting Multispectral and Contextual Information to Improve Human Detection", "addresses": [{"name": "State University of New Jersey", "source_name": "The State University of New Jersey", "street_adddress": "Rutgers New Brunswick: Livingston Campus, Joyce Kilmer Avenue, Piscataway Township, Middlesex County, New Jersey, 08854, USA", "lat": "40.51865195", "lng": "-74.44099801", "type": "edu", "country": "United States"}], "year": "2017", "pdf": ["https://pdfs.semanticscholar.org/5d5d/267416aeb2bbddaf06f703e8683753abdcd0.pdf"], "doi": []}, {"id": "0cf0ad8235929417d904acd1c672713ca4fdb105", "title": "Fusion of Head and Full-Body Detectors for Multi-object Tracking", "addresses": [{"name": "Technical University Munich", "source_name": "Technical University Munich", "street_adddress": "TUM, 21, Arcisstra\u00dfe, Bezirksteil K\u00f6nigsplatz, Stadtbezirk 03 Maxvorstadt, M\u00fcnchen, Obb, Bayern, 80333, Deutschland", "lat": "48.14955455", "lng": "11.56775314", "type": "edu", "country": "Germany"}], "year": "2018", "pdf": ["https://arxiv.org/pdf/1705.08314.pdf"], "doi": []}, {"id": "cab97d4dc67919f965cc884d80e4d6b743a256eb", "title": "Scale Mapping and Dynamic Re-Detecting in Dense Head Detection", "addresses": [{"name": "South China University of Technology", "source_name": "South China University of Technology", "street_adddress": "\u534e\u5357\u7406\u5de5\u5927\u5b66, \u5927\u5b66\u57ce\u4e2d\u73af\u4e1c\u8def, \u5e7f\u5dde\u5927\u5b66\u57ce, \u65b0\u9020, \u756a\u79ba\u533a (Panyu), \u5e7f\u5dde\u5e02, \u5e7f\u4e1c\u7701, 510006, \u4e2d\u56fd", "lat": "23.05020420", "lng": "113.39880323", "type": "edu", "country": "China"}], "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8451653"]}, {"id": "8a6665bfe3ad126d84414280ee2f884818560063", "title": "Vehicle Detection in Urban Traffic Surveillance Images Based on Convolutional Neural Networks with Feature Concatenation", "addresses": [{"name": "China University of Mining and Technology", "source_name": "China University of Mining and Technology", "street_adddress": "China University of Mining and Technology, 1\u53f7, \u5927\u5b66\u8def, \u6cc9\u5c71\u533a (Quanshan), \u5f90\u5dde\u5e02 / Xuzhou, \u6c5f\u82cf\u7701, 221116, \u4e2d\u56fd", "lat": "34.21525380", "lng": "117.13985410", "type": "edu", "country": "China"}], "year": "2019", "pdf": ["https://pdfs.semanticscholar.org/8a66/65bfe3ad126d84414280ee2f884818560063.pdf"], "doi": []}, {"id": "333880ee776d57555f54935978ccb2ce13cfdb07", "title": "Person classification leveraging Convolutional Neural Network for obstacle avoidance via Unmanned Aerial Vehicles", "addresses": [{"name": "Cranfield University", "source_name": "Cranfield University, UK", "street_adddress": "College Rd, Wharley End, Bedford MK43 0AL, UK", "lat": "52.07418180", "lng": "-0.62781230", "type": "edu", "country": "United Kingdom"}], "year": "2017", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8101662"]}, {"id": "f30bdc22fe454a28234cf858d5bf9de94701ee7a", "title": "Adaptive NMS: Refining Pedestrian Detection in a Crowd", "addresses": [{"name": "Beihang University", "source_name": "Beihang University", "street_adddress": "\u5317\u4eac\u822a\u7a7a\u822a\u5929\u5927\u5b66, 37, \u5b66\u9662\u8def, \u4e94\u9053\u53e3, \u540e\u516b\u5bb6, \u6d77\u6dc0\u533a, 100083, \u4e2d\u56fd", "lat": "39.98083330", "lng": "116.34101249", "type": "edu", "country": "China"}], "year": "2019", "pdf": ["https://arxiv.org/pdf/1904.03629.pdf"], "doi": []}, {"id": "976647b32fd7e1a5c8ee4a11792155903bb34e43", "title": "Multi-Organ Plant Classification Based on Convolutional and Recurrent Neural Networks", "addresses": [{"name": "University of Malaya, Kuala Lumpur", "source_name": "University of Malaya", "street_adddress": "UM, Lingkaran Wawasan, Bukit Pantai, Bangsar, KL, 50603, Malaysia", "lat": "3.12267405", "lng": "101.65356103", "type": "edu", "country": "Malaysia"}, {"name": "Kingston University", "source_name": "Kingston University", "street_adddress": "Kingston University, Kingston Hill, Kingston Vale, Kingston-upon-Thames, London, Greater London, England, KT2 7TF, UK", "lat": "51.42930860", "lng": "-0.26840440", "type": "edu", "country": "United Kingdom"}], "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8359391"]}, {"id": "66a08ff2ea7447093624632e7069e3da16961d30", "title": "An Incremental Framework for Video-Based Traffic Sign Detection, Tracking, and Recognition", "addresses": [{"name": "Northwestern Polytechnical University", "source_name": "Northwestern Polytechnical University", "street_adddress": "\u897f\u5317\u5de5\u4e1a\u5927\u5b66 \u53cb\u8c0a\u6821\u533a, 127\u53f7, \u53cb\u8c0a\u897f\u8def, \u957f\u5b89\u8def, \u7891\u6797\u533a (Beilin), \u897f\u5b89\u5e02, \u9655\u897f\u7701, 710072, \u4e2d\u56fd", "lat": "34.24691520", "lng": "108.91061982", "type": "edu", "country": "China"}], "year": "2017", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7605450"]}, {"id": "6fc8c988dd841c6c4f5e96b1b1458b6aa564b2de", "title": "Crowd Counting via Scale-Adaptive Convolutional Neural Network", "addresses": [{"name": "Shanghai Jiaotong University", "source_name": "Shanghai Jiaotong University", "street_adddress": "China, Shanghai, Minhang, \u4e1c\u5ddd\u8def \u90ae\u653f\u7f16\u7801: 200240", "lat": "31.02522010", "lng": "121.43377840", "type": "edu", "country": "China"}], "year": "2018", "pdf": ["https://arxiv.org/pdf/1711.04433.pdf"], "doi": []}, {"id": "e607b91f69ea2bff3194b07c5d22b4625bbe306e", "title": "Learning to See People Like People : Predicting Social Impressions of Faces", "addresses": [{"name": "Purdue University", "source_name": "Purdue University", "street_adddress": "Purdue University, West Stadium Avenue, West Lafayette, Tippecanoe County, Indiana, 47907, USA", "lat": "40.43197220", "lng": "-86.92389368", "type": "edu", "country": "United States"}], "year": "2017", "pdf": ["https://pdfs.semanticscholar.org/e607/b91f69ea2bff3194b07c5d22b4625bbe306e.pdf"], "doi": []}, {"id": "737f3cf354f40a6a7fd8a2058fe2803b8dd6c56b", "title": "Image Crowd Counting Using Convolutional Neural Network and Markov Random Field", "addresses": [{"name": "Shanghai University", "source_name": "Shanghai University", "street_adddress": "\u4e0a\u6d77\u5927\u5b66, \u9526\u79cb\u8def, \u5927\u573a\u9547, \u5b9d\u5c71\u533a (Baoshan), \u4e0a\u6d77\u5e02, 201906, \u4e2d\u56fd", "lat": "31.32235655", "lng": "121.38400941", "type": "edu", "country": "China"}], "year": "2017", "pdf": ["https://pdfs.semanticscholar.org/737f/3cf354f40a6a7fd8a2058fe2803b8dd6c56b.pdf"], "doi": []}, {"id": "3e667c54e848233db092b794f2cfbf47ea63b771", "title": "Combined convolutional and recurrent neural networks for hierarchical classification of images", "addresses": [{"name": "Northwestern University", "source_name": "Northwestern University", "street_adddress": "Northwestern University, Northwestern Place, Downtown, Evanston, Cook County, Illinois, 60208, USA", "lat": "42.05511640", "lng": "-87.67581113", "type": "edu", "country": "United States"}], "year": "2018", "pdf": ["https://arxiv.org/pdf/1809.09574.pdf"], "doi": []}, {"id": "a5c8fc1ca4f06a344b53dc81ebc6d87f54896722", "title": "Learning to see people like people", "addresses": [{"name": "University of California, San Diego", "source_name": "University of California, San Diego", "street_adddress": "UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA", "lat": "32.87935255", "lng": "-117.23110049", "type": "edu", "country": "United States"}, {"name": "Purdue University", "source_name": "Purdue University", "street_adddress": "Purdue University, West Stadium Avenue, West Lafayette, Tippecanoe County, Indiana, 47907, USA", "lat": "40.43197220", "lng": "-86.92389368", "type": "edu", "country": "United States"}], "year": "2017", "pdf": ["https://arxiv.org/pdf/1705.04282.pdf"], "doi": []}, {"id": "7a9c2524a157a6b5d0204c2f3b187cd67eb26b4c", "title": "SINet: A Scale-Insensitive Convolutional Neural Network for Fast Vehicle Detection", "addresses": [{"name": "Hong Kong Polytechnic University", "source_name": "Hong Kong Polytechnic University", "street_adddress": "hong kong, 11, \u80b2\u624d\u9053 Yuk Choi Road, \u5c16\u6c99\u5480 Tsim Sha Tsui, \u6cb9\u5c16\u65fa\u5340 Yau Tsim Mong District, \u4e5d\u9f8d Kowloon, HK, 00000, \u4e2d\u56fd", "lat": "22.30457200", "lng": "114.17976285", "type": "edu", "country": "China"}, {"name": "Chinese University of Hong Kong", "source_name": "Chinese University of Hong Kong", "street_adddress": "Hong Kong, \u99ac\u6599\u6c34\u6c60\u65c1\u8def", "lat": "22.41626320", "lng": "114.21093180", "type": "edu", "country": "China"}, {"name": "South China University of Technology", "source_name": "South China University of Technology", "street_adddress": "\u534e\u5357\u7406\u5de5\u5927\u5b66, \u5927\u5b66\u57ce\u4e2d\u73af\u4e1c\u8def, \u5e7f\u5dde\u5927\u5b66\u57ce, \u65b0\u9020, \u756a\u79ba\u533a (Panyu), \u5e7f\u5dde\u5e02, \u5e7f\u4e1c\u7701, 510006, \u4e2d\u56fd", "lat": "23.05020420", "lng": "113.39880323", "type": "edu", "country": "China"}], "year": "2019", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8478157"]}, {"id": "c7f4e18ec3a805b8aee1d3d7552364c8f5f6ca1d", "title": "An Auto-adaptive CNN for Crowd Counting in Monitor Image", "addresses": [{"name": "Beijing University of Posts and Telecommunications", "source_name": "Beijing University of Posts and Telecommunications", "street_adddress": "\u5317\u4eac\u90ae\u7535\u5927\u5b66, \u897f\u571f\u57ce\u8def, \u6d77\u6dc0\u533a, \u5317\u4eac\u5e02, 100082, \u4e2d\u56fd", "lat": "39.96014880", "lng": "116.35193921", "type": "edu", "country": "China"}], "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8525541"]}, {"id": "b1db174463b0bbc54a61fcc83acfb89ad3e3d18f", "title": "Loss Functions for Multiset Prediction", "addresses": [{"name": "New York University", "source_name": "New York University", "street_adddress": "NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA", "lat": "40.72925325", "lng": "-73.99625394", "type": "edu", "country": "United States"}], "year": "2018", "pdf": ["https://arxiv.org/pdf/1711.05246.pdf"], "doi": []}, {"id": "81ba5202424906f64b77f68afca063658139fbb2", "title": "Social Scene Understanding: End-to-End Multi-person Action Localization and Collective Activity Recognition", "addresses": [{"name": "Stanford University", "source_name": "Stanford University", "street_adddress": "Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA", "lat": "37.43131385", "lng": "-122.16936535", "type": "edu", "country": "United States"}, {"name": "IDIAP Research Institute", "source_name": "IDIAP Research Institute", "street_adddress": "Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra", "lat": "46.10923700", "lng": "7.08453549", "type": "edu", "country": "Switzerland"}], "year": "2017", "pdf": ["https://arxiv.org/pdf/1611.09078.pdf"], "doi": []}, {"id": "830466596c0908399760f2009a09ce605e3121c9", "title": "Revisiting Perspective Information for Efficient Crowd Counting", "addresses": [{"name": "Peking University", "source_name": "Peking University", "street_adddress": "\u5317\u4eac\u5927\u5b66, 5\u53f7, \u9890\u548c\u56ed\u8def, \u7a3b\u9999\u56ed\u5357\u793e\u533a, \u6d77\u6dc0\u533a, \u5317\u4eac\u5e02, 100871, \u4e2d\u56fd", "lat": "39.99223790", "lng": "116.30393816", "type": "edu", "country": "China"}, {"name": "Tongji University", "source_name": "Tongji University", "street_adddress": "\u540c\u6d4e\u5927\u5b66, 1239, \u56db\u5e73\u8def, \u6c5f\u6e7e, \u8679\u53e3\u533a, \u4e0a\u6d77\u5e02, 200092, \u4e2d\u56fd", "lat": "31.28473925", "lng": "121.49694909", "type": "edu", "country": "China"}], "year": "2018", "pdf": ["https://arxiv.org/pdf/1807.01989.pdf"], "doi": []}, {"id": "1d1a2387663cb3510d6f5c0651c1dc55e48dcabc", "title": "Joint Pedestrian and Body Part Detection via Semantic Relationship Learning", "addresses": [{"name": "Hebei University of Technology", "source_name": "Hebei University of Technology, Tianjin, P. R. China", "street_adddress": "8 Dingzigu 1st Rd, Hongqiao Qu, China, 300131", "lat": "39.17963500", "lng": "117.16588200", "type": "edu", "country": "China"}, {"name": "Chinese Academy of Sciences", "source_name": "Chinese Academy of Sciences", "street_adddress": "\u4e2d\u56fd\u79d1\u5b66\u9662\u5fc3\u7406\u7814\u7a76\u6240, 16, \u6797\u8403\u8def, \u671d\u9633\u533a / Chaoyang, \u5317\u4eac\u5e02, 100101, \u4e2d\u56fd", "lat": "40.00447950", "lng": "116.37023800", "type": "edu", "country": "China"}, {"name": "Beijing Information Science and Technology University", "source_name": "Beijing Information Science and Technology University, Beijing, China", "street_adddress": "China, Beijing, Haidian, \u6e05\u6cb3\u56db\u62d4\u5b50", "lat": "40.04332040", "lng": "116.34181090", "type": "edu", "country": "China"}], "year": "2019", "pdf": ["https://pdfs.semanticscholar.org/1d1a/2387663cb3510d6f5c0651c1dc55e48dcabc.pdf"], "doi": []}, {"id": "91d2f0b7c23239740fa15dff0b5893b992c0ab34", "title": "Sequence-to-Segment Networks for Segment Detection", "addresses": [{"name": "Stony Brook University", "source_name": "Stony Brook University", "street_adddress": "Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA", "lat": "40.91531960", "lng": "-73.12706260", "type": "edu", "country": "United States"}], "year": "2018", "pdf": ["https://pdfs.semanticscholar.org/91d2/f0b7c23239740fa15dff0b5893b992c0ab34.pdf"], "doi": []}, {"id": "8e7c647d8e8ba726b03f7e7c5cc395f86b9de9be", "title": "Parsing Pose of People with Interaction", "addresses": [{"name": "California Institute of Technology", "source_name": "California Institute of Technology", "street_adddress": "California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA", "lat": "34.13710185", "lng": "-118.12527487", "type": "edu", "country": "United States"}], "year": "2018", "pdf": ["https://pdfs.semanticscholar.org/8e7c/647d8e8ba726b03f7e7c5cc395f86b9de9be.pdf"], "doi": []}, {"id": "2f02c1d4858d9c0f5d16099eb090560d5fa4f23f", "title": "Detecting Heads using Feature Refine Net and Cascaded Multi-scale Architecture", "addresses": [{"name": "South China University of Technology", "source_name": "South China University of Technology", "street_adddress": "\u534e\u5357\u7406\u5de5\u5927\u5b66, \u5927\u5b66\u57ce\u4e2d\u73af\u4e1c\u8def, \u5e7f\u5dde\u5927\u5b66\u57ce, \u65b0\u9020, \u756a\u79ba\u533a (Panyu), \u5e7f\u5dde\u5e02, \u5e7f\u4e1c\u7701, 510006, \u4e2d\u56fd", "lat": "23.05020420", "lng": "113.39880323", "type": "edu", "country": "China"}], "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8545068"]}, {"id": "c43c3ec39005aaf1f0ad2f641d92f41021342217", "title": "Pedestrian Detection with Autoregressive Network Phases", "addresses": [{"name": "Michigan State University", "source_name": "Michigan State University", "street_adddress": "Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu", "country": "United States"}], "year": "2018", "pdf": ["https://arxiv.org/pdf/1812.00440.pdf"], "doi": []}, {"id": "e1a784e360f2bf3911cffa30506d46c412659db5", "title": "MetaAnchor: Learning to Detect Objects with Customized Anchors", "addresses": [{"name": "Fudan University", "source_name": "Fudan University", "street_adddress": "\u590d\u65e6\u5927\u5b66, 220, \u90af\u90f8\u8def, \u4e94\u89d2\u573a\u8857\u9053, \u6768\u6d66\u533a, \u4e0a\u6d77\u5e02, 200433, \u4e2d\u56fd", "lat": "31.30104395", "lng": "121.50045497", "type": "edu", "country": "China"}], "year": "2018", "pdf": ["https://arxiv.org/pdf/1807.00980.pdf"], "doi": []}, {"id": "9da3ade53bf91556fe46828b216aab20a4e72294", "title": "SNc Neuron Detection Method Based on Deep Learning for Efficacy Evaluation of Anti-PD Drugs", "addresses": [{"name": "Peking University", "source_name": "Peking University", "street_adddress": "\u5317\u4eac\u5927\u5b66, 5\u53f7, \u9890\u548c\u56ed\u8def, \u7a3b\u9999\u56ed\u5357\u793e\u533a, \u6d77\u6dc0\u533a, \u5317\u4eac\u5e02, 100871, \u4e2d\u56fd", "lat": "39.99223790", "lng": "116.30393816", "type": "edu", "country": "China"}, {"name": "Shanghai Jiao Tong University", "source_name": "Shanghai Jiao Tong University", "street_adddress": "\u4e0a\u6d77\u4ea4\u901a\u5927\u5b66\uff08\u5f90\u6c47\u6821\u533a\uff09, \u6dee\u6d77\u897f\u8def, \u756a\u79ba\u5c0f\u533a, \u5e73\u9634\u6865, \u5f90\u6c47\u533a, \u4e0a\u6d77\u5e02, 200052, \u4e2d\u56fd", "lat": "31.20081505", "lng": "121.42840681", "type": "edu", "country": "China"}], "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8431470"]}, {"id": "cfa48bc1015b88809e362b4da19fe4459acb1d89", "title": "Learning to Filter Object Detections", "addresses": [{"name": "Max Planck Institute for Intelligent Systems", "source_name": "Max Planck Institute for Intelligent Systems", "street_adddress": "Heisenbergstra\u00dfe 3, 70569 Stuttgart, Germany", "lat": "48.74689390", "lng": "9.08051410", "type": "edu", "country": "Germany"}, {"name": "Microsoft", "source_name": "Microsoft Corporation, Redmond, WA, USA", "street_adddress": "One Microsoft Way, Redmond, WA 98052, USA", "lat": "47.64233180", "lng": "-122.13693020", "type": "company", "country": "United States"}], "year": "2017", "pdf": ["https://pdfs.semanticscholar.org/cfa4/8bc1015b88809e362b4da19fe4459acb1d89.pdf"], "doi": []}, {"id": "eb5780e49ad7e713e9f34cf067d45030fb29753e", "title": "Iterative fully convolutional neural networks for automatic vertebra segmentation and identification", "addresses": [{"name": "University Medical Center Utrecht", "source_name": "Rudolf Magnus Institute of Neuroscience, University Medical Center Utrecht, Utrecht, The Netherlands", "street_adddress": "Vondellaan 94, 3521 GH Utrecht, Netherlands", "lat": "52.07869500", "lng": "5.11974690", "type": "edu", "country": "Netherlands"}], "year": "2018", "pdf": ["https://arxiv.org/pdf/1804.04383.pdf"], "doi": []}, {"id": "54c9343c247ce5a82bc52f83df84c2bbae737ce4", "title": "Deep Learning on Attributed Graphs: A Journey from Graphs to Their Embeddings and Back. (L'apprentissage profond sur graphes attribu\u00e9s: Un voyage aller-retour aux plongements des graphes)", "addresses": [{"name": "INRIA Sophia Antipolis", "source_name": "INRIA Sophia Antipolis Meditérannée, Valbonne, France", "street_adddress": "2004 Route des Lucioles, 06902 Valbonne, France", "lat": "43.61581310", "lng": "7.06838000", "type": "edu", "country": "France"}], "year": "2018", "pdf": ["https://arxiv.org/pdf/1901.08296.pdf"], "doi": []}, {"id": "2d81cf3214281af85eb1d9d270a897d62302e88e", "title": "High density people estimation in video surveillance", "addresses": [{"name": "AvidBeam", "source_name": "AvidBeam", "street_adddress": "5 Wadi Al Nile, Maadi Al Khabiri Ash Sharqeyah, Al Maadi, Cairo Governorate 11728, Egypt", "lat": "29.95606300", "lng": "31.25547100", "type": "company", "country": "Egypt"}, {"name": "Faculty of Media Engineering & Technology German University in Cairo", "source_name": "Faculty of Media Engineering & Technology German University in Cairo, Egypt", "street_adddress": "Cairo Governorate, Egypt", "lat": "29.98663810", "lng": "31.44142180", "type": "edu", "country": "Egypt"}, {"name": "German University in Cairo", "source_name": "Faculty of Media Engineering and Technology, German University in Cairo, Egypt", "street_adddress": "Cairo Governorate, Egypt", "lat": "29.98663810", "lng": "31.44142180", "type": "edu", "country": "Egypt"}], "year": "2017", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8275348"]}, {"id": "d54d0dc1e7ef82f3ed0ee332e0777b6d73477a8c", "title": "Sidewalk-level People Flow Estimation Using Dashboard Cameras Based on Deep Learning", "addresses": [{"name": "Osaka University", "source_name": "Osaka University", "street_adddress": "\u5927\u962a\u5927\u5b66\u6e05\u660e\u5bee, \u670d\u90e8\u897f\u753a\u56db\u4e01\u76ee, \u8c4a\u4e2d\u5e02, \u5927\u962a\u5e9c, \u8fd1\u757f\u5730\u65b9, \u65e5\u672c", "lat": "34.80809035", "lng": "135.45785218", "type": "edu", "country": "Japan"}], "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8653595"]}, {"id": "9607f88fe6298f754e3f610c3a587d8e10b1b0a9", "title": "Scene Text Detection with Recurrent Instance Segmentation", "addresses": [{"name": "National Laboratory of Pattern Recognition", "source_name": "National Laboratory of Pattern Recognition & Chinese Academy of Sciences & University of Chinese Academy of Sciences, Beijing, China", "street_adddress": "China, Beijing, Haidian, Zhongguancun South 1st Alley, \u4e2d\u5173\u6751\u5357\u4e00\u6761", "lat": "39.98177000", "lng": "116.33008600", "type": "edu", "country": "China"}], "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8545073"]}, {"id": "ed2ba6448db8cf945ca24d4df11916c2c5c3edd1", "title": "Rapid Pedestrian Detection Based on Deep Omega-Shape Features with Partial Occlusion Handing", "addresses": [{"name": "University of Electronic Science and Technology of China", "source_name": "University of Electronic Science and Technology of China", "street_adddress": "2 Jianshe North Rd 2nd Section, Jianshe Road, Chenghua Qu, Chengdu Shi, Sichuan Sheng, China, 610054", "lat": "30.67272100", "lng": "104.09880600", "type": "edu", "country": "China"}], "year": "2018", "pdf": [], "doi": ["http://doi.org/10.1007/s11063-018-9837-1"]}, {"id": "deea683731f468c7234e1089f48c4546e7003b18", "title": "Multi-Task Vehicle Detection With Region-of-Interest Voting", "addresses": [{"name": "Alibaba Group, Hangzhou, China", "source_name": "Alibaba Group, Hangzhou, China", "street_adddress": "Alibaba Group, \u4e94\u5e38\u8857\u9053, \u4f59\u676d\u533a (Yuhang), \u676d\u5dde\u5e02 Hangzhou, \u6d59\u6c5f\u7701, \u4e2d\u56fd", "lat": "30.28106540", "lng": "120.02139087", "type": "edu", "country": "China"}, {"name": "Alibaba, Hangzhou, China", "source_name": "Alibaba, Hangzhou, China", "street_adddress": "699 Wangshang Rd, Binjiang Qu, Hangzhou Shi, Zhejiang Sheng, China", "lat": "30.18996400", "lng": "120.19210000", "type": "edu", "country": "China"}, {"name": "Zhejiang University", "source_name": "Zhejiang University", "street_adddress": "\u6d59\u6c5f\u5927\u5b66\u4e4b\u6c5f\u6821\u533a, \u4e4b\u6c5f\u8def, \u8f6c\u5858\u8857\u9053, \u897f\u6e56\u533a (Xihu), \u676d\u5dde\u5e02 Hangzhou, \u6d59\u6c5f\u7701, 310008, \u4e2d\u56fd", "lat": "30.19331415", "lng": "120.11930822", "type": "edu", "country": "China"}], "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8066331", "http://doi.org/10.1109/TIP.2017.2762591", "https://www.ncbi.nlm.nih.gov/pubmed/29028197", "https://www.wikidata.org/entity/Q50063923"]}, {"id": "1303b3e9f4bffb87b6dd34f7849f7a57e00253d1", "title": "Point in, Box out: Beyond Counting Persons in Crowds", "addresses": [{"name": "Sichuan University, Chengdu", "source_name": "Sichuan Univ., Chengdu", "street_adddress": "\u56db\u5ddd\u5927\u5b66\uff08\u534e\u897f\u6821\u533a\uff09, \u6821\u4e1c\u8def, \u6b66\u4faf\u533a, \u6b66\u4faf\u533a (Wuhou), \u6210\u90fd\u5e02 / Chengdu, \u56db\u5ddd\u7701, 610014, \u4e2d\u56fd", "lat": "30.64276900", "lng": "104.06751175", "type": "edu", "country": "China"}, {"name": "INRIA", "source_name": "INRIA Grenoble Rhone-Alpes, Grenoble, France", "street_adddress": "655 Avenue de l'Europe, 38330 Montbonnot-Saint-Martin, France", "lat": "45.21788600", "lng": "5.80736900", "type": "edu", "country": "France"}], "year": "2019", "pdf": ["https://arxiv.org/pdf/1904.01333.pdf"], "doi": []}, {"id": "4140ccbca5a585162420b82e9e4eeb9afbd1e7c2", "title": "Simulating Crowds in Egress Scenarios", "addresses": [{"name": "Pontifical Catholic University of Rio Grande do Sul", "source_name": "Pontifical Catholic University of Rio Grande do Sul", "street_adddress": "Av. Ipiranga, 6681 - Partenon, Porto Alegre - RS, 90619-900, Brazil", "lat": "-30.05934460", "lng": "-51.17349120", "type": "edu", "country": "Brazil"}, {"name": "University of Pennsylvania", "source_name": "University of Pennsylvania", "street_adddress": "Penn Museum, 3260, South Street, University City, Philadelphia, Philadelphia County, Pennsylvania, 19104, USA", "lat": "39.94923440", "lng": "-75.19198985", "type": "edu", "country": "United States"}, {"name": "Federal University of Rio Grande do Sul", "source_name": "Institute of Informatics, Federal University of Rio Grande do Sul, Porto Alegre, Brazil", "street_adddress": "Av. Paulo Gama, 110 - Farroupilha, Porto Alegre - RS, 90040-060, Brazil", "lat": "-30.03382480", "lng": "-51.21882800", "type": "edu", "country": "Brazil"}], "year": "2017", "pdf": [], "doi": ["http://doi.org/10.1007/978-3-319-65202-3"]}, {"id": "5031a110219231ceaa820725c6e77f87f7b2fde2", "title": "Training with Confusion for Fine-Grained Visual Classification", "addresses": [{"name": "Harvard University", "source_name": "Harvard University", "street_adddress": "Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA", "lat": "42.36782045", "lng": "-71.12666653", "type": "edu", "country": "United States"}, {"name": "MIT", "source_name": "Massachusetts Institute", "street_adddress": "MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA", "lat": "42.35839610", "lng": "-71.09567788", "type": "edu", "country": "United States"}, {"name": "Brigham Young University", "source_name": "Brigham Young University, Provo, USA", "street_adddress": "Provo, UT 84602, USA", "lat": "40.25184350", "lng": "-111.64931560", "type": "edu", "country": "United States"}], "year": "2017", "pdf": ["https://pdfs.semanticscholar.org/5031/a110219231ceaa820725c6e77f87f7b2fde2.pdf"], "doi": []}, {"id": "86c158ef6caaf247d5d14e07c5edded0147df8b7", "title": "Spatial Memory for Context Reasoning in Object Detection", "addresses": [{"name": "Carnegie Mellon University", "source_name": "Carnegie Mellon University Pittsburgh, PA - 15213, USA", "street_adddress": "Carnegie Mellon University, Forbes Avenue, Squirrel Hill North, PGH, Allegheny County, Pennsylvania, 15213, USA", "lat": "40.44416190", "lng": "-79.94272826", "type": "edu", "country": "United States"}], "year": "2017", "pdf": ["https://arxiv.org/pdf/1704.04224.pdf"], "doi": []}, {"id": "bba153ebdf11e6fb8716e35749c671ac96c14176", "title": "Image Crowd Counting Using Convolutional Neural Network and Markov Random Field", "addresses": [{"name": "Shanghai University", "source_name": "Shanghai University", "street_adddress": "\u4e0a\u6d77\u5927\u5b66, \u9526\u79cb\u8def, \u5927\u573a\u9547, \u5b9d\u5c71\u533a (Baoshan), \u4e0a\u6d77\u5e02, 201906, \u4e2d\u56fd", "lat": "31.32235655", "lng": "121.38400941", "type": "edu", "country": "China"}], "year": "2017", "pdf": ["https://arxiv.org/pdf/1706.03686.pdf"], "doi": []}, {"id": "fe5679c183cb432894d111bf02cc3243c89762ca", "title": "Adaptive Scenario Discovery for Crowd Counting", "addresses": [{"name": "East China Normal University", "source_name": "East China Normal University", "street_adddress": "\u534e\u4e1c\u5e08\u8303\u5927\u5b66, 3663, \u4e2d\u5c71\u5317\u8def, \u66f9\u5bb6\u6e21, \u666e\u9640\u533a, \u666e\u9640\u533a (Putuo), \u4e0a\u6d77\u5e02, 200062, \u4e2d\u56fd", "lat": "31.22849230", "lng": "121.40211389", "type": "edu", "country": "China"}], "year": "2018", "pdf": ["https://arxiv.org/pdf/1812.02393.pdf"], "doi": []}, {"id": "11824658170994e4d4655e8f688bace16a0d3e48", "title": "Multi-person Head Segmentation in Low Resolution Crowd Scenes Using Convolutional Encoder-Decoder Framework", "addresses": [{"name": "Qatar University", "source_name": "Qatar University", "street_adddress": "Qatar University, Roindabout 3, Al Tarfa (68), \u0623\u0645 \u0635\u0644\u0627\u0644, 24685, \u200f\u0642\u0637\u0631\u200e", "lat": "25.37461295", "lng": "51.48980354", "type": "edu", "country": "Qatar"}, {"name": "University of Warwick", "source_name": "University of Warwick", "street_adddress": "University of Warwick, University Road, Kirby Corner, Cannon Park, Coventry, West Midlands Combined Authority, West Midlands, England, CV4 7AL, UK", "lat": "52.37931310", "lng": "-1.56042520", "type": "edu", "country": "United Kingdom"}], "year": "2017", "pdf": ["https://pdfs.semanticscholar.org/1182/4658170994e4d4655e8f688bace16a0d3e48.pdf"], "doi": []}, {"id": "061356704ec86334dbbc073985375fe13cd39088", "title": "Very Deep Convolutional Networks for Large-Scale Image Recognition", "addresses": [{"name": "University of Oxford", "source_name": "University of Oxford", "street_adddress": "Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK", "lat": "51.75345380", "lng": "-1.25400997", "type": "edu", "country": "United Kingdom"}], "year": "2015", "pdf": ["https://arxiv.org/pdf/1409.1556.pdf"], "doi": []}, {"id": "14318685b5959b51d0f1e3db34643eb2855dc6d9", "title": "Going deeper with convolutions", "addresses": [{"name": "Google", "source_name": "Google, Inc.", "street_adddress": "1600 Amphitheatre Pkwy, Mountain View, CA 94043, USA", "lat": "37.42199990", "lng": "-122.08405750", "type": "company", "country": "United States"}, {"name": "University of Michigan", "source_name": "University of Michigan", "street_adddress": "University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA", "lat": "42.29421420", "lng": "-83.71003894", "type": "edu", "country": "United States"}, {"name": "University of North Carolina", "source_name": "University of North Carolina", "street_adddress": "University of North Carolina, Emergency Room Drive, Chapel Hill, Orange County, North Carolina, 27599, USA", "lat": "35.90503535", "lng": "-79.04775327", "type": "edu", "country": "United States"}], "year": "2015", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7298594"]}, {"id": "1bd1645a629f1b612960ab9bba276afd4cf7c666", "title": "End-to-End People Detection in Crowded Scenes", "addresses": [{"name": "Stanford University", "source_name": "Stanford University", "street_adddress": "Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA", "lat": "37.43131385", "lng": "-122.16936535", "type": "edu", "country": "United States"}], "year": "2016", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7780624"]}, {"id": "7e915bb8e4ada4f8d261bc855a4f587ea97764ca", "title": "People detection in crowded scenes via regional-based convolutional network", "addresses": [{"name": "Peking University", "source_name": "Peking University", "street_adddress": "\u5317\u4eac\u5927\u5b66, 5\u53f7, \u9890\u548c\u56ed\u8def, \u7a3b\u9999\u56ed\u5357\u793e\u533a, \u6d77\u6dc0\u533a, \u5317\u4eac\u5e02, 100871, \u4e2d\u56fd", "lat": "39.99223790", "lng": "116.30393816", "type": "edu", "country": "China"}], "year": "2016", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7877809", "http://doi.org/10.1109/ICSP.2016.7877809"]}, {"id": "591a4bfa6380c9fcd5f3ae690e3ac5c09b7bf37b", "title": "A Replacement Algorithm of Non-Maximum Suppression Base on Graph Clustering", "addresses": [{"name": "National University of Defense Technology, China", "source_name": "National University of Defence Technology, Changsha 410000, China", "street_adddress": "\u56fd\u9632\u79d1\u5b66\u6280\u672f\u5927\u5b66, \u4e09\u4e00\u5927\u9053, \u5f00\u798f\u533a, \u5f00\u798f\u533a (Kaifu), \u957f\u6c99\u5e02 / Changsha, \u6e56\u5357\u7701, 410073, \u4e2d\u56fd", "lat": "28.22902090", "lng": "112.99483204", "type": "mil", "country": "China"}], "year": "2017", "pdf": ["https://pdfs.semanticscholar.org/591a/4bfa6380c9fcd5f3ae690e3ac5c09b7bf37b.pdf"], "doi": []}, {"id": "b02d31c640b0a31fb18c4f170d841d8e21ffb66c", "title": "Localized region context and object feature fusion for people head detection", "addresses": [{"name": "National University of Defense Technology, China", "source_name": "National University of Defence Technology, Changsha 410000, China", "street_adddress": "\u56fd\u9632\u79d1\u5b66\u6280\u672f\u5927\u5b66, \u4e09\u4e00\u5927\u9053, \u5f00\u798f\u533a, \u5f00\u798f\u533a (Kaifu), \u957f\u6c99\u5e02 / Changsha, \u6e56\u5357\u7701, 410073, \u4e2d\u56fd", "lat": "28.22902090", "lng": "112.99483204", "type": "mil", "country": "China"}], "year": "2016", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7532426", "http://doi.org/10.1109/ICIP.2016.7532426"]}, {"id": "68ea88440fc48d59c7407e71a193ff1973f9ba7c", "title": "Shoulder Keypoint-Detection from Object Detection", "addresses": [{"name": "University of Ottawa", "source_name": "University of Ottawa", "street_adddress": "University of Ottawa, 1, Stewart Street, Byward Market, Lowertown, Rideau-Vanier, Ottawa, Ontario, K1N 6N5, Canada", "lat": "45.42580475", "lng": "-75.68740118", "type": "edu", "country": "Canada"}], "year": "2018", "pdf": ["https://pdfs.semanticscholar.org/68ea/88440fc48d59c7407e71a193ff1973f9ba7c.pdf"], "doi": []}]}
\ No newline at end of file diff --git a/site/datasets/final/brainwash.json b/site/datasets/final/brainwash.json index c87bc3f4..21300e93 100644 --- a/site/datasets/final/brainwash.json +++ b/site/datasets/final/brainwash.json @@ -1 +1 @@ -{"id": "1bd1645a629f1b612960ab9bba276afd4cf7c666", "dataset": {"key": "brainwash", "name_short": "Brainwash", "name_display": "Brainwash Dataset", "name_full": "Brainwash Dataset", "purpose": "Head detection", "comment": "", "created_by": "Stanford University (US), Max Planck Institute for Informatics (DE)", "funded_by": "Max Planck Center for Visual Computing and Communication", "funded_by_short": "Max Planck Center for Visual Computing and Communication", "used_by": "", "license": "", "url": "https://purl.stanford.edu/sx925dc9385", "dl_im": "Y", "dl_meta": "", "dl_paper": "", "dl_web": "", "mp_pub": "Y", "ft_share": "Y", "nyt_share": "Y", "cooperative": "N", "indoor": "Y", "outdoor": "", "campus": "", "cyberspace": "", "parent": "", "source": "cctv_indoor", "usernames": "", "names": "", "flickr_meta": "", "year_start": "", "year_end": "", "year_published": "2015", "ongoing": "", "images": "11,917 ", "videos": "", "tracklets": "", "identities": "", "img_per_person": "", "num_cameras": "", "faces_or_persons": "91,146", "female": "", "male": "", "landmarks": "", "width": "640", "height": "480", "color": "Y", "gray": "", "tags": "fd", "size_gb": "4.1", "agreement": "", "agreement_signed": "", "flickr": "", "facebook": "", "youtube": "", "vimeo": "", "google": "", "bing": "", "adam": "", "berit": "", "charlie": "Y", "notes": "", "derivative_of": "", "": ""}, "paper": {"paper_id": "1bd1645a629f1b612960ab9bba276afd4cf7c666", "key": "brainwash", "title": "End-to-End People Detection in Crowded Scenes", "year": "2016", "pdf": [], "address": {"name": "Stanford University", "source_name": "Stanford University", "street_adddress": "Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA", "lat": "37.43131385", "lng": "-122.16936535", "type": "edu", "country": "United States"}, "name": "Brainwash", "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7780624"]}, "addresses": [{"name": "Stanford University", "source_name": "Stanford University", "street_adddress": "Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA", "lat": "37.43131385", "lng": "-122.16936535", "type": "edu", "country": "United States"}], "additional_papers": [], "citations": [{"id": "e35515f699b60472ac8f50d1da84fab3c55417d6", "title": "Key Parts Context and Scene Geometry in Human Head Detection", "addresses": [{"name": "Tsinghua University", "source_name": "Tsinghua University", "street_adddress": "\u6e05\u534e\u5927\u5b66, 30, \u53cc\u6e05\u8def, \u4e94\u9053\u53e3, \u540e\u516b\u5bb6, \u6d77\u6dc0\u533a, 100084, \u4e2d\u56fd", "lat": "40.00229045", "lng": "116.32098908", "type": "edu", "country": "China"}], "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8451832"]}, {"id": "5d5d267416aeb2bbddaf06f703e8683753abdcd0", "title": "Exploiting Multispectral and Contextual Information to Improve Human Detection", "addresses": [{"name": "State University of New Jersey", "source_name": "The State University of New Jersey", "street_adddress": "Rutgers New Brunswick: Livingston Campus, Joyce Kilmer Avenue, Piscataway Township, Middlesex County, New Jersey, 08854, USA", "lat": "40.51865195", "lng": "-74.44099801", "type": "edu", "country": "United States"}], "year": "2017", "pdf": ["https://pdfs.semanticscholar.org/5d5d/267416aeb2bbddaf06f703e8683753abdcd0.pdf"], "doi": []}, {"id": "0cf0ad8235929417d904acd1c672713ca4fdb105", "title": "Fusion of Head and Full-Body Detectors for Multi-object Tracking", "addresses": [{"name": "Technical University Munich", "source_name": "Technical University Munich", "street_adddress": "TUM, 21, Arcisstra\u00dfe, Bezirksteil K\u00f6nigsplatz, Stadtbezirk 03 Maxvorstadt, M\u00fcnchen, Obb, Bayern, 80333, Deutschland", "lat": "48.14955455", "lng": "11.56775314", "type": "edu", "country": "Germany"}], "year": "2018", "pdf": ["https://arxiv.org/pdf/1705.08314.pdf"], "doi": []}, {"id": "cab97d4dc67919f965cc884d80e4d6b743a256eb", "title": "Scale Mapping and Dynamic Re-Detecting in Dense Head Detection", "addresses": [{"name": "South China University of Technology", "source_name": "South China University of Technology", "street_adddress": "\u534e\u5357\u7406\u5de5\u5927\u5b66, \u5927\u5b66\u57ce\u4e2d\u73af\u4e1c\u8def, \u5e7f\u5dde\u5927\u5b66\u57ce, \u65b0\u9020, \u756a\u79ba\u533a (Panyu), \u5e7f\u5dde\u5e02, \u5e7f\u4e1c\u7701, 510006, \u4e2d\u56fd", "lat": "23.05020420", "lng": "113.39880323", "type": "edu", "country": "China"}], "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8451653"]}, {"id": "8a6665bfe3ad126d84414280ee2f884818560063", "title": "Vehicle Detection in Urban Traffic Surveillance Images Based on Convolutional Neural Networks with Feature Concatenation", "addresses": [{"name": "China University of Mining and Technology", "source_name": "China University of Mining and Technology", "street_adddress": "China University of Mining and Technology, 1\u53f7, \u5927\u5b66\u8def, \u6cc9\u5c71\u533a (Quanshan), \u5f90\u5dde\u5e02 / Xuzhou, \u6c5f\u82cf\u7701, 221116, \u4e2d\u56fd", "lat": "34.21525380", "lng": "117.13985410", "type": "edu", "country": "China"}], "year": "2019", "pdf": ["https://pdfs.semanticscholar.org/8a66/65bfe3ad126d84414280ee2f884818560063.pdf"], "doi": []}, {"id": "333880ee776d57555f54935978ccb2ce13cfdb07", "title": "Person classification leveraging Convolutional Neural Network for obstacle avoidance via Unmanned Aerial Vehicles", "addresses": [{"name": "Cranfield University", "source_name": "Cranfield University, UK", "street_adddress": "College Rd, Wharley End, Bedford MK43 0AL, UK", "lat": "52.07418180", "lng": "-0.62781230", "type": "edu", "country": "United Kingdom"}], "year": "2017", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8101662"]}, {"id": "f30bdc22fe454a28234cf858d5bf9de94701ee7a", "title": "Adaptive NMS: Refining Pedestrian Detection in a Crowd", "addresses": [{"name": "Beihang University", "source_name": "Beihang University", "street_adddress": "\u5317\u4eac\u822a\u7a7a\u822a\u5929\u5927\u5b66, 37, \u5b66\u9662\u8def, \u4e94\u9053\u53e3, \u540e\u516b\u5bb6, \u6d77\u6dc0\u533a, 100083, \u4e2d\u56fd", "lat": "39.98083330", "lng": "116.34101249", "type": "edu", "country": "China"}], "year": "2019", "pdf": ["https://arxiv.org/pdf/1904.03629.pdf"], "doi": []}, {"id": "976647b32fd7e1a5c8ee4a11792155903bb34e43", "title": "Multi-Organ Plant Classification Based on Convolutional and Recurrent Neural Networks", "addresses": [{"name": "University of Malaya, Kuala Lumpur", "source_name": "University of Malaya", "street_adddress": "UM, Lingkaran Wawasan, Bukit Pantai, Bangsar, KL, 50603, Malaysia", "lat": "3.12267405", "lng": "101.65356103", "type": "edu", "country": "Malaysia"}, {"name": "Kingston University", "source_name": "Kingston University", "street_adddress": "Kingston University, Kingston Hill, Kingston Vale, Kingston-upon-Thames, London, Greater London, England, KT2 7TF, UK", "lat": "51.42930860", "lng": "-0.26840440", "type": "edu", "country": "United Kingdom"}], "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8359391"]}, {"id": "66a08ff2ea7447093624632e7069e3da16961d30", "title": "An Incremental Framework for Video-Based Traffic Sign Detection, Tracking, and Recognition", "addresses": [{"name": "Northwestern Polytechnical University", "source_name": "Northwestern Polytechnical University", "street_adddress": "\u897f\u5317\u5de5\u4e1a\u5927\u5b66 \u53cb\u8c0a\u6821\u533a, 127\u53f7, \u53cb\u8c0a\u897f\u8def, \u957f\u5b89\u8def, \u7891\u6797\u533a (Beilin), \u897f\u5b89\u5e02, \u9655\u897f\u7701, 710072, \u4e2d\u56fd", "lat": "34.24691520", "lng": "108.91061982", "type": "edu", "country": "China"}], "year": "2017", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7605450"]}, {"id": "6fc8c988dd841c6c4f5e96b1b1458b6aa564b2de", "title": "Crowd Counting via Scale-Adaptive Convolutional Neural Network", "addresses": [{"name": "Shanghai Jiaotong University", "source_name": "Shanghai Jiaotong University", "street_adddress": "China, Shanghai, Minhang, \u4e1c\u5ddd\u8def \u90ae\u653f\u7f16\u7801: 200240", "lat": "31.02522010", "lng": "121.43377840", "type": "edu", "country": "China"}], "year": "2018", "pdf": ["https://arxiv.org/pdf/1711.04433.pdf"], "doi": []}, {"id": "e607b91f69ea2bff3194b07c5d22b4625bbe306e", "title": "Learning to See People Like People : Predicting Social Impressions of Faces", "addresses": [{"name": "Purdue University", "source_name": "Purdue University", "street_adddress": "Purdue University, West Stadium Avenue, West Lafayette, Tippecanoe County, Indiana, 47907, USA", "lat": "40.43197220", "lng": "-86.92389368", "type": "edu", "country": "United States"}], "year": "2017", "pdf": ["https://pdfs.semanticscholar.org/e607/b91f69ea2bff3194b07c5d22b4625bbe306e.pdf"], "doi": []}, {"id": "737f3cf354f40a6a7fd8a2058fe2803b8dd6c56b", "title": "Image Crowd Counting Using Convolutional Neural Network and Markov Random Field", "addresses": [{"name": "Shanghai University", "source_name": "Shanghai University", "street_adddress": "\u4e0a\u6d77\u5927\u5b66, \u9526\u79cb\u8def, \u5927\u573a\u9547, \u5b9d\u5c71\u533a (Baoshan), \u4e0a\u6d77\u5e02, 201906, \u4e2d\u56fd", "lat": "31.32235655", "lng": "121.38400941", "type": "edu", "country": "China"}], "year": "2017", "pdf": ["https://pdfs.semanticscholar.org/737f/3cf354f40a6a7fd8a2058fe2803b8dd6c56b.pdf"], "doi": []}, {"id": "3e667c54e848233db092b794f2cfbf47ea63b771", "title": "Combined convolutional and recurrent neural networks for hierarchical classification of images", "addresses": [{"name": "Northwestern University", "source_name": "Northwestern University", "street_adddress": "Northwestern University, Northwestern Place, Downtown, Evanston, Cook County, Illinois, 60208, USA", "lat": "42.05511640", "lng": "-87.67581113", "type": "edu", "country": "United States"}], "year": "2018", "pdf": ["https://arxiv.org/pdf/1809.09574.pdf"], "doi": []}, {"id": "a5c8fc1ca4f06a344b53dc81ebc6d87f54896722", "title": "Learning to see people like people", "addresses": [{"name": "University of California, San Diego", "source_name": "University of California, San Diego", "street_adddress": "UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA", "lat": "32.87935255", "lng": "-117.23110049", "type": "edu", "country": "United States"}, {"name": "Purdue University", "source_name": "Purdue University", "street_adddress": "Purdue University, West Stadium Avenue, West Lafayette, Tippecanoe County, Indiana, 47907, USA", "lat": "40.43197220", "lng": "-86.92389368", "type": "edu", "country": "United States"}], "year": "2017", "pdf": ["https://arxiv.org/pdf/1705.04282.pdf"], "doi": []}, {"id": "7a9c2524a157a6b5d0204c2f3b187cd67eb26b4c", "title": "SINet: A Scale-Insensitive Convolutional Neural Network for Fast Vehicle Detection", "addresses": [{"name": "Hong Kong Polytechnic University", "source_name": "Hong Kong Polytechnic University", "street_adddress": "hong kong, 11, \u80b2\u624d\u9053 Yuk Choi Road, \u5c16\u6c99\u5480 Tsim Sha Tsui, \u6cb9\u5c16\u65fa\u5340 Yau Tsim Mong District, \u4e5d\u9f8d Kowloon, HK, 00000, \u4e2d\u56fd", "lat": "22.30457200", "lng": "114.17976285", "type": "edu", "country": "China"}, {"name": "Chinese University of Hong Kong", "source_name": "Chinese University of Hong Kong", "street_adddress": "Hong Kong, \u99ac\u6599\u6c34\u6c60\u65c1\u8def", "lat": "22.41626320", "lng": "114.21093180", "type": "edu", "country": "China"}, {"name": "South China University of Technology", "source_name": "South China University of Technology", "street_adddress": "\u534e\u5357\u7406\u5de5\u5927\u5b66, \u5927\u5b66\u57ce\u4e2d\u73af\u4e1c\u8def, \u5e7f\u5dde\u5927\u5b66\u57ce, \u65b0\u9020, \u756a\u79ba\u533a (Panyu), \u5e7f\u5dde\u5e02, \u5e7f\u4e1c\u7701, 510006, \u4e2d\u56fd", "lat": "23.05020420", "lng": "113.39880323", "type": "edu", "country": "China"}], "year": "2019", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8478157"]}, {"id": "c7f4e18ec3a805b8aee1d3d7552364c8f5f6ca1d", "title": "An Auto-adaptive CNN for Crowd Counting in Monitor Image", "addresses": [{"name": "Beijing University of Posts and Telecommunications", "source_name": "Beijing University of Posts and Telecommunications", "street_adddress": "\u5317\u4eac\u90ae\u7535\u5927\u5b66, \u897f\u571f\u57ce\u8def, \u6d77\u6dc0\u533a, \u5317\u4eac\u5e02, 100082, \u4e2d\u56fd", "lat": "39.96014880", "lng": "116.35193921", "type": "edu", "country": "China"}], "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8525541"]}, {"id": "b1db174463b0bbc54a61fcc83acfb89ad3e3d18f", "title": "Loss Functions for Multiset Prediction", "addresses": [{"name": "New York University", "source_name": "New York University", "street_adddress": "NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA", "lat": "40.72925325", "lng": "-73.99625394", "type": "edu", "country": "United States"}], "year": "2018", "pdf": ["https://arxiv.org/pdf/1711.05246.pdf"], "doi": []}, {"id": "81ba5202424906f64b77f68afca063658139fbb2", "title": "Social Scene Understanding: End-to-End Multi-person Action Localization and Collective Activity Recognition", "addresses": [{"name": "Stanford University", "source_name": "Stanford University", "street_adddress": "Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA", "lat": "37.43131385", "lng": "-122.16936535", "type": "edu", "country": "United States"}, {"name": "IDIAP Research Institute", "source_name": "IDIAP Research Institute", "street_adddress": "Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra", "lat": "46.10923700", "lng": "7.08453549", "type": "edu", "country": "Switzerland"}], "year": "2017", "pdf": ["https://arxiv.org/pdf/1611.09078.pdf"], "doi": []}, {"id": "830466596c0908399760f2009a09ce605e3121c9", "title": "Revisiting Perspective Information for Efficient Crowd Counting", "addresses": [{"name": "Peking University", "source_name": "Peking University", "street_adddress": "\u5317\u4eac\u5927\u5b66, 5\u53f7, \u9890\u548c\u56ed\u8def, \u7a3b\u9999\u56ed\u5357\u793e\u533a, \u6d77\u6dc0\u533a, \u5317\u4eac\u5e02, 100871, \u4e2d\u56fd", "lat": "39.99223790", "lng": "116.30393816", "type": "edu", "country": "China"}, {"name": "Tongji University", "source_name": "Tongji University", "street_adddress": "\u540c\u6d4e\u5927\u5b66, 1239, \u56db\u5e73\u8def, \u6c5f\u6e7e, \u8679\u53e3\u533a, \u4e0a\u6d77\u5e02, 200092, \u4e2d\u56fd", "lat": "31.28473925", "lng": "121.49694909", "type": "edu", "country": "China"}], "year": "2018", "pdf": ["https://arxiv.org/pdf/1807.01989.pdf"], "doi": []}, {"id": "1d1a2387663cb3510d6f5c0651c1dc55e48dcabc", "title": "Joint Pedestrian and Body Part Detection via Semantic Relationship Learning", "addresses": [{"name": "Hebei University of Technology", "source_name": "Hebei University of Technology, Tianjin, P. R. China", "street_adddress": "8 Dingzigu 1st Rd, Hongqiao Qu, China, 300131", "lat": "39.17963500", "lng": "117.16588200", "type": "edu", "country": "China"}, {"name": "Chinese Academy of Sciences", "source_name": "Chinese Academy of Sciences", "street_adddress": "\u4e2d\u56fd\u79d1\u5b66\u9662\u5fc3\u7406\u7814\u7a76\u6240, 16, \u6797\u8403\u8def, \u671d\u9633\u533a / Chaoyang, \u5317\u4eac\u5e02, 100101, \u4e2d\u56fd", "lat": "40.00447950", "lng": "116.37023800", "type": "edu", "country": "China"}, {"name": "Beijing Information Science and Technology University", "source_name": "Beijing Information Science and Technology University, Beijing, China", "street_adddress": "China, Beijing, Haidian, \u6e05\u6cb3\u56db\u62d4\u5b50", "lat": "40.04332040", "lng": "116.34181090", "type": "edu", "country": "China"}], "year": "2019", "pdf": ["https://pdfs.semanticscholar.org/1d1a/2387663cb3510d6f5c0651c1dc55e48dcabc.pdf"], "doi": []}, {"id": "91d2f0b7c23239740fa15dff0b5893b992c0ab34", "title": "Sequence-to-Segment Networks for Segment Detection", "addresses": [{"name": "Stony Brook University", "source_name": "Stony Brook University", "street_adddress": "Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA", "lat": "40.91531960", "lng": "-73.12706260", "type": "edu", "country": "United States"}], "year": "2018", "pdf": ["https://pdfs.semanticscholar.org/91d2/f0b7c23239740fa15dff0b5893b992c0ab34.pdf"], "doi": []}, {"id": "8e7c647d8e8ba726b03f7e7c5cc395f86b9de9be", "title": "Parsing Pose of People with Interaction", "addresses": [{"name": "California Institute of Technology", "source_name": "California Institute of Technology", "street_adddress": "California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA", "lat": "34.13710185", "lng": "-118.12527487", "type": "edu", "country": "United States"}], "year": "2018", "pdf": ["https://pdfs.semanticscholar.org/8e7c/647d8e8ba726b03f7e7c5cc395f86b9de9be.pdf"], "doi": []}, {"id": "2f02c1d4858d9c0f5d16099eb090560d5fa4f23f", "title": "Detecting Heads using Feature Refine Net and Cascaded Multi-scale Architecture", "addresses": [{"name": "South China University of Technology", "source_name": "South China University of Technology", "street_adddress": "\u534e\u5357\u7406\u5de5\u5927\u5b66, \u5927\u5b66\u57ce\u4e2d\u73af\u4e1c\u8def, \u5e7f\u5dde\u5927\u5b66\u57ce, \u65b0\u9020, \u756a\u79ba\u533a (Panyu), \u5e7f\u5dde\u5e02, \u5e7f\u4e1c\u7701, 510006, \u4e2d\u56fd", "lat": "23.05020420", "lng": "113.39880323", "type": "edu", "country": "China"}], "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8545068"]}, {"id": "c43c3ec39005aaf1f0ad2f641d92f41021342217", "title": "Pedestrian Detection with Autoregressive Network Phases", "addresses": [{"name": "Michigan State University", "source_name": "Michigan State University", "street_adddress": "Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu", "country": "United States"}], "year": "2018", "pdf": ["https://arxiv.org/pdf/1812.00440.pdf"], "doi": []}, {"id": "e1a784e360f2bf3911cffa30506d46c412659db5", "title": "MetaAnchor: Learning to Detect Objects with Customized Anchors", "addresses": [{"name": "Fudan University", "source_name": "Fudan University", "street_adddress": "\u590d\u65e6\u5927\u5b66, 220, \u90af\u90f8\u8def, \u4e94\u89d2\u573a\u8857\u9053, \u6768\u6d66\u533a, \u4e0a\u6d77\u5e02, 200433, \u4e2d\u56fd", "lat": "31.30104395", "lng": "121.50045497", "type": "edu", "country": "China"}], "year": "2018", "pdf": ["https://arxiv.org/pdf/1807.00980.pdf"], "doi": []}, {"id": "9da3ade53bf91556fe46828b216aab20a4e72294", "title": "SNc Neuron Detection Method Based on Deep Learning for Efficacy Evaluation of Anti-PD Drugs", "addresses": [{"name": "Peking University", "source_name": "Peking University", "street_adddress": "\u5317\u4eac\u5927\u5b66, 5\u53f7, \u9890\u548c\u56ed\u8def, \u7a3b\u9999\u56ed\u5357\u793e\u533a, \u6d77\u6dc0\u533a, \u5317\u4eac\u5e02, 100871, \u4e2d\u56fd", "lat": "39.99223790", "lng": "116.30393816", "type": "edu", "country": "China"}, {"name": "Shanghai Jiao Tong University", "source_name": "Shanghai Jiao Tong University", "street_adddress": "\u4e0a\u6d77\u4ea4\u901a\u5927\u5b66\uff08\u5f90\u6c47\u6821\u533a\uff09, \u6dee\u6d77\u897f\u8def, \u756a\u79ba\u5c0f\u533a, \u5e73\u9634\u6865, \u5f90\u6c47\u533a, \u4e0a\u6d77\u5e02, 200052, \u4e2d\u56fd", "lat": "31.20081505", "lng": "121.42840681", "type": "edu", "country": "China"}], "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8431470"]}, {"id": "cfa48bc1015b88809e362b4da19fe4459acb1d89", "title": "Learning to Filter Object Detections", "addresses": [{"name": "Max Planck Institute for Intelligent Systems", "source_name": "Max Planck Institute for Intelligent Systems", "street_adddress": "Heisenbergstra\u00dfe 3, 70569 Stuttgart, Germany", "lat": "48.74689390", "lng": "9.08051410", "type": "edu", "country": "Germany"}, {"name": "Microsoft", "source_name": "Microsoft Corporation, Redmond, WA, USA", "street_adddress": "One Microsoft Way, Redmond, WA 98052, USA", "lat": "47.64233180", "lng": "-122.13693020", "type": "company", "country": "United States"}], "year": "2017", "pdf": ["https://pdfs.semanticscholar.org/cfa4/8bc1015b88809e362b4da19fe4459acb1d89.pdf"], "doi": []}, {"id": "eb5780e49ad7e713e9f34cf067d45030fb29753e", "title": "Iterative fully convolutional neural networks for automatic vertebra segmentation and identification", "addresses": [{"name": "University Medical Center Utrecht", "source_name": "Rudolf Magnus Institute of Neuroscience, University Medical Center Utrecht, Utrecht, The Netherlands", "street_adddress": "Vondellaan 94, 3521 GH Utrecht, Netherlands", "lat": "52.07869500", "lng": "5.11974690", "type": "edu", "country": "Netherlands"}], "year": "2018", "pdf": ["https://arxiv.org/pdf/1804.04383.pdf"], "doi": []}, {"id": "54c9343c247ce5a82bc52f83df84c2bbae737ce4", "title": "Deep Learning on Attributed Graphs: A Journey from Graphs to Their Embeddings and Back. (L'apprentissage profond sur graphes attribu\u00e9s: Un voyage aller-retour aux plongements des graphes)", "addresses": [{"name": "INRIA Sophia Antipolis", "source_name": "INRIA Sophia Antipolis Meditérannée, Valbonne, France", "street_adddress": "2004 Route des Lucioles, 06902 Valbonne, France", "lat": "43.61581310", "lng": "7.06838000", "type": "edu", "country": "France"}], "year": "2018", "pdf": ["https://arxiv.org/pdf/1901.08296.pdf"], "doi": []}, {"id": "2d81cf3214281af85eb1d9d270a897d62302e88e", "title": "High density people estimation in video surveillance", "addresses": [{"name": "AvidBeam", "source_name": "AvidBeam", "street_adddress": "5 Wadi Al Nile, Maadi Al Khabiri Ash Sharqeyah, Al Maadi, Cairo Governorate 11728, Egypt", "lat": "29.95606300", "lng": "31.25547100", "type": "company", "country": "Egypt"}, {"name": "Faculty of Media Engineering & Technology German University in Cairo", "source_name": "Faculty of Media Engineering & Technology German University in Cairo, Egypt", "street_adddress": "Cairo Governorate, Egypt", "lat": "29.98663810", "lng": "31.44142180", "type": "edu", "country": "Egypt"}, {"name": "German University in Cairo", "source_name": "Faculty of Media Engineering and Technology, German University in Cairo, Egypt", "street_adddress": "Cairo Governorate, Egypt", "lat": "29.98663810", "lng": "31.44142180", "type": "edu", "country": "Egypt"}], "year": "2017", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8275348"]}, {"id": "d54d0dc1e7ef82f3ed0ee332e0777b6d73477a8c", "title": "Sidewalk-level People Flow Estimation Using Dashboard Cameras Based on Deep Learning", "addresses": [{"name": "Osaka University", "source_name": "Osaka University", "street_adddress": "\u5927\u962a\u5927\u5b66\u6e05\u660e\u5bee, \u670d\u90e8\u897f\u753a\u56db\u4e01\u76ee, \u8c4a\u4e2d\u5e02, \u5927\u962a\u5e9c, \u8fd1\u757f\u5730\u65b9, \u65e5\u672c", "lat": "34.80809035", "lng": "135.45785218", "type": "edu", "country": "Japan"}], "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8653595"]}, {"id": "9607f88fe6298f754e3f610c3a587d8e10b1b0a9", "title": "Scene Text Detection with Recurrent Instance Segmentation", "addresses": [{"name": "National Laboratory of Pattern Recognition", "source_name": "National Laboratory of Pattern Recognition & Chinese Academy of Sciences & University of Chinese Academy of Sciences, Beijing, China", "street_adddress": "China, Beijing, Haidian, Zhongguancun South 1st Alley, \u4e2d\u5173\u6751\u5357\u4e00\u6761", "lat": "39.98177000", "lng": "116.33008600", "type": "edu", "country": "China"}], "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8545073"]}, {"id": "ed2ba6448db8cf945ca24d4df11916c2c5c3edd1", "title": "Rapid Pedestrian Detection Based on Deep Omega-Shape Features with Partial Occlusion Handing", "addresses": [{"name": "University of Electronic Science and Technology of China", "source_name": "University of Electronic Science and Technology of China", "street_adddress": "2 Jianshe North Rd 2nd Section, Jianshe Road, Chenghua Qu, Chengdu Shi, Sichuan Sheng, China, 610054", "lat": "30.67272100", "lng": "104.09880600", "type": "edu", "country": "China"}], "year": "2018", "pdf": [], "doi": ["http://doi.org/10.1007/s11063-018-9837-1"]}, {"id": "deea683731f468c7234e1089f48c4546e7003b18", "title": "Multi-Task Vehicle Detection With Region-of-Interest Voting", "addresses": [{"name": "Alibaba Group, Hangzhou, China", "source_name": "Alibaba Group, Hangzhou, China", "street_adddress": "Alibaba Group, \u4e94\u5e38\u8857\u9053, \u4f59\u676d\u533a (Yuhang), \u676d\u5dde\u5e02 Hangzhou, \u6d59\u6c5f\u7701, \u4e2d\u56fd", "lat": "30.28106540", "lng": "120.02139087", "type": "edu", "country": "China"}, {"name": "Alibaba, Hangzhou, China", "source_name": "Alibaba, Hangzhou, China", "street_adddress": "699 Wangshang Rd, Binjiang Qu, Hangzhou Shi, Zhejiang Sheng, China", "lat": "30.18996400", "lng": "120.19210000", "type": "edu", "country": "China"}, {"name": "Zhejiang University", "source_name": "Zhejiang University", "street_adddress": "\u6d59\u6c5f\u5927\u5b66\u4e4b\u6c5f\u6821\u533a, \u4e4b\u6c5f\u8def, \u8f6c\u5858\u8857\u9053, \u897f\u6e56\u533a (Xihu), \u676d\u5dde\u5e02 Hangzhou, \u6d59\u6c5f\u7701, 310008, \u4e2d\u56fd", "lat": "30.19331415", "lng": "120.11930822", "type": "edu", "country": "China"}], "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8066331", "http://doi.org/10.1109/TIP.2017.2762591", "https://www.ncbi.nlm.nih.gov/pubmed/29028197", "https://www.wikidata.org/entity/Q50063923"]}, {"id": "1303b3e9f4bffb87b6dd34f7849f7a57e00253d1", "title": "Point in, Box out: Beyond Counting Persons in Crowds", "addresses": [{"name": "Sichuan University, Chengdu", "source_name": "Sichuan Univ., Chengdu", "street_adddress": "\u56db\u5ddd\u5927\u5b66\uff08\u534e\u897f\u6821\u533a\uff09, \u6821\u4e1c\u8def, \u6b66\u4faf\u533a, \u6b66\u4faf\u533a (Wuhou), \u6210\u90fd\u5e02 / Chengdu, \u56db\u5ddd\u7701, 610014, \u4e2d\u56fd", "lat": "30.64276900", "lng": "104.06751175", "type": "edu", "country": "China"}, {"name": "INRIA", "source_name": "INRIA Grenoble Rhone-Alpes, Grenoble, France", "street_adddress": "655 Avenue de l'Europe, 38330 Montbonnot-Saint-Martin, France", "lat": "45.21788600", "lng": "5.80736900", "type": "edu", "country": "France"}], "year": "2019", "pdf": ["https://arxiv.org/pdf/1904.01333.pdf"], "doi": []}, {"id": "4140ccbca5a585162420b82e9e4eeb9afbd1e7c2", "title": "Simulating Crowds in Egress Scenarios", "addresses": [{"name": "Pontifical Catholic University of Rio Grande do Sul", "source_name": "Pontifical Catholic University of Rio Grande do Sul", "street_adddress": "Av. Ipiranga, 6681 - Partenon, Porto Alegre - RS, 90619-900, Brazil", "lat": "-30.05934460", "lng": "-51.17349120", "type": "edu", "country": "Brazil"}, {"name": "University of Pennsylvania", "source_name": "University of Pennsylvania", "street_adddress": "Penn Museum, 3260, South Street, University City, Philadelphia, Philadelphia County, Pennsylvania, 19104, USA", "lat": "39.94923440", "lng": "-75.19198985", "type": "edu", "country": "United States"}, {"name": "Federal University of Rio Grande do Sul", "source_name": "Institute of Informatics, Federal University of Rio Grande do Sul, Porto Alegre, Brazil", "street_adddress": "Av. Paulo Gama, 110 - Farroupilha, Porto Alegre - RS, 90040-060, Brazil", "lat": "-30.03382480", "lng": "-51.21882800", "type": "edu", "country": "Brazil"}], "year": "2017", "pdf": [], "doi": ["http://doi.org/10.1007/978-3-319-65202-3"]}, {"id": "5031a110219231ceaa820725c6e77f87f7b2fde2", "title": "Training with Confusion for Fine-Grained Visual Classification", "addresses": [{"name": "Harvard University", "source_name": "Harvard University", "street_adddress": "Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA", "lat": "42.36782045", "lng": "-71.12666653", "type": "edu", "country": "United States"}, {"name": "MIT", "source_name": "Massachusetts Institute", "street_adddress": "MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA", "lat": "42.35839610", "lng": "-71.09567788", "type": "edu", "country": "United States"}, {"name": "Brigham Young University", "source_name": "Brigham Young University, Provo, USA", "street_adddress": "Provo, UT 84602, USA", "lat": "40.25184350", "lng": "-111.64931560", "type": "edu", "country": "United States"}], "year": "2017", "pdf": ["https://pdfs.semanticscholar.org/5031/a110219231ceaa820725c6e77f87f7b2fde2.pdf"], "doi": []}, {"id": "86c158ef6caaf247d5d14e07c5edded0147df8b7", "title": "Spatial Memory for Context Reasoning in Object Detection", "addresses": [{"name": "Carnegie Mellon University", "source_name": "Carnegie Mellon University Pittsburgh, PA - 15213, USA", "street_adddress": "Carnegie Mellon University, Forbes Avenue, Squirrel Hill North, PGH, Allegheny County, Pennsylvania, 15213, USA", "lat": "40.44416190", "lng": "-79.94272826", "type": "edu", "country": "United States"}], "year": "2017", "pdf": ["https://arxiv.org/pdf/1704.04224.pdf"], "doi": []}, {"id": "bba153ebdf11e6fb8716e35749c671ac96c14176", "title": "Image Crowd Counting Using Convolutional Neural Network and Markov Random Field", "addresses": [{"name": "Shanghai University", "source_name": "Shanghai University", "street_adddress": "\u4e0a\u6d77\u5927\u5b66, \u9526\u79cb\u8def, \u5927\u573a\u9547, \u5b9d\u5c71\u533a (Baoshan), \u4e0a\u6d77\u5e02, 201906, \u4e2d\u56fd", "lat": "31.32235655", "lng": "121.38400941", "type": "edu", "country": "China"}], "year": "2017", "pdf": ["https://arxiv.org/pdf/1706.03686.pdf"], "doi": []}, {"id": "fe5679c183cb432894d111bf02cc3243c89762ca", "title": "Adaptive Scenario Discovery for Crowd Counting", "addresses": [{"name": "East China Normal University", "source_name": "East China Normal University", "street_adddress": "\u534e\u4e1c\u5e08\u8303\u5927\u5b66, 3663, \u4e2d\u5c71\u5317\u8def, \u66f9\u5bb6\u6e21, \u666e\u9640\u533a, \u666e\u9640\u533a (Putuo), \u4e0a\u6d77\u5e02, 200062, \u4e2d\u56fd", "lat": "31.22849230", "lng": "121.40211389", "type": "edu", "country": "China"}], "year": "2018", "pdf": ["https://arxiv.org/pdf/1812.02393.pdf"], "doi": []}, {"id": "11824658170994e4d4655e8f688bace16a0d3e48", "title": "Multi-person Head Segmentation in Low Resolution Crowd Scenes Using Convolutional Encoder-Decoder Framework", "addresses": [{"name": "Qatar University", "source_name": "Qatar University", "street_adddress": "Qatar University, Roindabout 3, Al Tarfa (68), \u0623\u0645 \u0635\u0644\u0627\u0644, 24685, \u200f\u0642\u0637\u0631\u200e", "lat": "25.37461295", "lng": "51.48980354", "type": "edu", "country": "Qatar"}, {"name": "University of Warwick", "source_name": "University of Warwick", "street_adddress": "University of Warwick, University Road, Kirby Corner, Cannon Park, Coventry, West Midlands Combined Authority, West Midlands, England, CV4 7AL, UK", "lat": "52.37931310", "lng": "-1.56042520", "type": "edu", "country": "United Kingdom"}], "year": "2017", "pdf": ["https://pdfs.semanticscholar.org/1182/4658170994e4d4655e8f688bace16a0d3e48.pdf"], "doi": []}, {"id": "7e915bb8e4ada4f8d261bc855a4f587ea97764ca", "title": "People detection in crowded scenes via regional-based convolutional network", "addresses": [{"name": "Peking University", "source_name": "Peking University", "street_adddress": "\u5317\u4eac\u5927\u5b66, 5\u53f7, \u9890\u548c\u56ed\u8def, \u7a3b\u9999\u56ed\u5357\u793e\u533a, \u6d77\u6dc0\u533a, \u5317\u4eac\u5e02, 100871, \u4e2d\u56fd", "lat": "39.99223790", "lng": "116.30393816", "type": "edu", "country": "China"}], "year": "2016", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7877809", "http://doi.org/10.1109/ICSP.2016.7877809"]}, {"id": "591a4bfa6380c9fcd5f3ae690e3ac5c09b7bf37b", "title": "A Replacement Algorithm of Non-Maximum Suppression Base on Graph Clustering", "addresses": [{"name": "National University of Defense Technology, China", "source_name": "National University of Defence Technology, Changsha 410000, China", "street_adddress": "\u56fd\u9632\u79d1\u5b66\u6280\u672f\u5927\u5b66, \u4e09\u4e00\u5927\u9053, \u5f00\u798f\u533a, \u5f00\u798f\u533a (Kaifu), \u957f\u6c99\u5e02 / Changsha, \u6e56\u5357\u7701, 410073, \u4e2d\u56fd", "lat": "28.22902090", "lng": "112.99483204", "type": "mil", "country": "China"}], "year": "2017", "pdf": ["https://pdfs.semanticscholar.org/591a/4bfa6380c9fcd5f3ae690e3ac5c09b7bf37b.pdf"], "doi": []}, {"id": "b02d31c640b0a31fb18c4f170d841d8e21ffb66c", "title": "Localized region context and object feature fusion for people head detection", "addresses": [{"name": "National University of Defense Technology, China", "source_name": "National University of Defence Technology, Changsha 410000, China", "street_adddress": "\u56fd\u9632\u79d1\u5b66\u6280\u672f\u5927\u5b66, \u4e09\u4e00\u5927\u9053, \u5f00\u798f\u533a, \u5f00\u798f\u533a (Kaifu), \u957f\u6c99\u5e02 / Changsha, \u6e56\u5357\u7701, 410073, \u4e2d\u56fd", "lat": "28.22902090", "lng": "112.99483204", "type": "mil", "country": "China"}], "year": "2016", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7532426", "http://doi.org/10.1109/ICIP.2016.7532426"]}]}
\ No newline at end of file +{"id": "1bd1645a629f1b612960ab9bba276afd4cf7c666", "dataset": {"key": "brainwash", "name_short": "Brainwash", "name_display": "Brainwash Dataset", "name_full": "Brainwash Dataset", "purpose": "Head detection", "comment": "", "created_by": "Stanford University (US), Max Planck Institute for Informatics (DE)", "funded_by": "Max Planck Center for Visual Computing and Communication", "funded_by_short": "Max Planck Center for Visual Computing and Communication", "used_by": "", "license": "", "url": "https://purl.stanford.edu/sx925dc9385", "dl_im": "Y", "dl_meta": "", "dl_paper": "", "dl_web": "", "mp_pub": "Y", "ft_share": "Y", "nyt_share": "Y", "cooperative": "N", "indoor": "Y", "outdoor": "", "campus": "", "cyberspace": "", "parent": "", "source": "cctv_indoor", "usernames": "", "names": "", "flickr_meta": "", "year_start": "", "year_end": "", "year_published": "2015", "ongoing": "", "images": "11,917 ", "videos": "", "tracklets": "", "identities": "", "img_per_person": "", "num_cameras": "", "faces_or_persons": "91,146", "female": "", "male": "", "landmarks": "", "width": "640", "height": "480", "color": "Y", "gray": "", "tags": "fd", "size_gb": "4.1", "agreement": "", "agreement_signed": "", "flickr": "", "facebook": "", "youtube": "", "vimeo": "", "google": "", "bing": "", "adam": "", "berit": "", "charlie": "Y", "notes": "", "derivative_of": "", "": ""}, "paper": {"paper_id": "1bd1645a629f1b612960ab9bba276afd4cf7c666", "key": "brainwash", "title": "End-to-End People Detection in Crowded Scenes", "year": "2016", "pdf": [], "address": {"name": "Stanford University", "source_name": "Stanford University", "street_adddress": "Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA", "lat": "37.43131385", "lng": "-122.16936535", "type": "edu", "country": "United States"}, "name": "Brainwash", "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7780624"]}, "addresses": [{"name": "Stanford University", "source_name": "Stanford University", "street_adddress": "Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA", "lat": "37.43131385", "lng": "-122.16936535", "type": "edu", "country": "United States"}], "additional_papers": [], "citations": [{"id": "e35515f699b60472ac8f50d1da84fab3c55417d6", "title": "Key Parts Context and Scene Geometry in Human Head Detection", "addresses": [{"name": "Tsinghua University", "source_name": "Tsinghua University", "street_adddress": "\u6e05\u534e\u5927\u5b66, 30, \u53cc\u6e05\u8def, \u4e94\u9053\u53e3, \u540e\u516b\u5bb6, \u6d77\u6dc0\u533a, 100084, \u4e2d\u56fd", "lat": "40.00229045", "lng": "116.32098908", "type": "edu", "country": "China"}], "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8451832"]}, {"id": "5d5d267416aeb2bbddaf06f703e8683753abdcd0", "title": "Exploiting Multispectral and Contextual Information to Improve Human Detection", "addresses": [{"name": "State University of New Jersey", "source_name": "The State University of New Jersey", "street_adddress": "Rutgers New Brunswick: Livingston Campus, Joyce Kilmer Avenue, Piscataway Township, Middlesex County, New Jersey, 08854, USA", "lat": "40.51865195", "lng": "-74.44099801", "type": "edu", "country": "United States"}], "year": "2017", "pdf": ["https://pdfs.semanticscholar.org/5d5d/267416aeb2bbddaf06f703e8683753abdcd0.pdf"], "doi": []}, {"id": "0cf0ad8235929417d904acd1c672713ca4fdb105", "title": "Fusion of Head and Full-Body Detectors for Multi-object Tracking", "addresses": [{"name": "Technical University Munich", "source_name": "Technical University Munich", "street_adddress": "TUM, 21, Arcisstra\u00dfe, Bezirksteil K\u00f6nigsplatz, Stadtbezirk 03 Maxvorstadt, M\u00fcnchen, Obb, Bayern, 80333, Deutschland", "lat": "48.14955455", "lng": "11.56775314", "type": "edu", "country": "Germany"}], "year": "2018", "pdf": ["https://arxiv.org/pdf/1705.08314.pdf"], "doi": []}, {"id": "cab97d4dc67919f965cc884d80e4d6b743a256eb", "title": "Scale Mapping and Dynamic Re-Detecting in Dense Head Detection", "addresses": [{"name": "South China University of Technology", "source_name": "South China University of Technology", "street_adddress": "\u534e\u5357\u7406\u5de5\u5927\u5b66, \u5927\u5b66\u57ce\u4e2d\u73af\u4e1c\u8def, \u5e7f\u5dde\u5927\u5b66\u57ce, \u65b0\u9020, \u756a\u79ba\u533a (Panyu), \u5e7f\u5dde\u5e02, \u5e7f\u4e1c\u7701, 510006, \u4e2d\u56fd", "lat": "23.05020420", "lng": "113.39880323", "type": "edu", "country": "China"}], "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8451653"]}, {"id": "8a6665bfe3ad126d84414280ee2f884818560063", "title": "Vehicle Detection in Urban Traffic Surveillance Images Based on Convolutional Neural Networks with Feature Concatenation", "addresses": [{"name": "China University of Mining and Technology", "source_name": "China University of Mining and Technology", "street_adddress": "China University of Mining and Technology, 1\u53f7, \u5927\u5b66\u8def, \u6cc9\u5c71\u533a (Quanshan), \u5f90\u5dde\u5e02 / Xuzhou, \u6c5f\u82cf\u7701, 221116, \u4e2d\u56fd", "lat": "34.21525380", "lng": "117.13985410", "type": "edu", "country": "China"}], "year": "2019", "pdf": ["https://pdfs.semanticscholar.org/8a66/65bfe3ad126d84414280ee2f884818560063.pdf"], "doi": []}, {"id": "333880ee776d57555f54935978ccb2ce13cfdb07", "title": "Person classification leveraging Convolutional Neural Network for obstacle avoidance via Unmanned Aerial Vehicles", "addresses": [{"name": "Cranfield University", "source_name": "Cranfield University, UK", "street_adddress": "College Rd, Wharley End, Bedford MK43 0AL, UK", "lat": "52.07418180", "lng": "-0.62781230", "type": "edu", "country": "United Kingdom"}], "year": "2017", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8101662"]}, {"id": "f30bdc22fe454a28234cf858d5bf9de94701ee7a", "title": "Adaptive NMS: Refining Pedestrian Detection in a Crowd", "addresses": [{"name": "Beihang University", "source_name": "Beihang University", "street_adddress": "\u5317\u4eac\u822a\u7a7a\u822a\u5929\u5927\u5b66, 37, \u5b66\u9662\u8def, \u4e94\u9053\u53e3, \u540e\u516b\u5bb6, \u6d77\u6dc0\u533a, 100083, \u4e2d\u56fd", "lat": "39.98083330", "lng": "116.34101249", "type": "edu", "country": "China"}], "year": "2019", "pdf": ["https://arxiv.org/pdf/1904.03629.pdf"], "doi": []}, {"id": "976647b32fd7e1a5c8ee4a11792155903bb34e43", "title": "Multi-Organ Plant Classification Based on Convolutional and Recurrent Neural Networks", "addresses": [{"name": "University of Malaya, Kuala Lumpur", "source_name": "University of Malaya", "street_adddress": "UM, Lingkaran Wawasan, Bukit Pantai, Bangsar, KL, 50603, Malaysia", "lat": "3.12267405", "lng": "101.65356103", "type": "edu", "country": "Malaysia"}, {"name": "Kingston University", "source_name": "Kingston University", "street_adddress": "Kingston University, Kingston Hill, Kingston Vale, Kingston-upon-Thames, London, Greater London, England, KT2 7TF, UK", "lat": "51.42930860", "lng": "-0.26840440", "type": "edu", "country": "United Kingdom"}], "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8359391"]}, {"id": "66a08ff2ea7447093624632e7069e3da16961d30", "title": "An Incremental Framework for Video-Based Traffic Sign Detection, Tracking, and Recognition", "addresses": [{"name": "Northwestern Polytechnical University", "source_name": "Northwestern Polytechnical University", "street_adddress": "\u897f\u5317\u5de5\u4e1a\u5927\u5b66 \u53cb\u8c0a\u6821\u533a, 127\u53f7, \u53cb\u8c0a\u897f\u8def, \u957f\u5b89\u8def, \u7891\u6797\u533a (Beilin), \u897f\u5b89\u5e02, \u9655\u897f\u7701, 710072, \u4e2d\u56fd", "lat": "34.24691520", "lng": "108.91061982", "type": "edu", "country": "China"}], "year": "2017", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7605450"]}, {"id": "6fc8c988dd841c6c4f5e96b1b1458b6aa564b2de", "title": "Crowd Counting via Scale-Adaptive Convolutional Neural Network", "addresses": [{"name": "Shanghai Jiaotong University", "source_name": "Shanghai Jiaotong University", "street_adddress": "China, Shanghai, Minhang, \u4e1c\u5ddd\u8def \u90ae\u653f\u7f16\u7801: 200240", "lat": "31.02522010", "lng": "121.43377840", "type": "edu", "country": "China"}], "year": "2018", "pdf": ["https://arxiv.org/pdf/1711.04433.pdf"], "doi": []}, {"id": "e607b91f69ea2bff3194b07c5d22b4625bbe306e", "title": "Learning to See People Like People : Predicting Social Impressions of Faces", "addresses": [{"name": "Purdue University", "source_name": "Purdue University", "street_adddress": "Purdue University, West Stadium Avenue, West Lafayette, Tippecanoe County, Indiana, 47907, USA", "lat": "40.43197220", "lng": "-86.92389368", "type": "edu", "country": "United States"}], "year": "2017", "pdf": ["https://pdfs.semanticscholar.org/e607/b91f69ea2bff3194b07c5d22b4625bbe306e.pdf"], "doi": []}, {"id": "737f3cf354f40a6a7fd8a2058fe2803b8dd6c56b", "title": "Image Crowd Counting Using Convolutional Neural Network and Markov Random Field", "addresses": [{"name": "Shanghai University", "source_name": "Shanghai University", "street_adddress": "\u4e0a\u6d77\u5927\u5b66, \u9526\u79cb\u8def, \u5927\u573a\u9547, \u5b9d\u5c71\u533a (Baoshan), \u4e0a\u6d77\u5e02, 201906, \u4e2d\u56fd", "lat": "31.32235655", "lng": "121.38400941", "type": "edu", "country": "China"}], "year": "2017", "pdf": ["https://pdfs.semanticscholar.org/737f/3cf354f40a6a7fd8a2058fe2803b8dd6c56b.pdf"], "doi": []}, {"id": "3e667c54e848233db092b794f2cfbf47ea63b771", "title": "Combined convolutional and recurrent neural networks for hierarchical classification of images", "addresses": [{"name": "Northwestern University", "source_name": "Northwestern University", "street_adddress": "Northwestern University, Northwestern Place, Downtown, Evanston, Cook County, Illinois, 60208, USA", "lat": "42.05511640", "lng": "-87.67581113", "type": "edu", "country": "United States"}], "year": "2018", "pdf": ["https://arxiv.org/pdf/1809.09574.pdf"], "doi": []}, {"id": "a5c8fc1ca4f06a344b53dc81ebc6d87f54896722", "title": "Learning to see people like people", "addresses": [{"name": "University of California, San Diego", "source_name": "University of California, San Diego", "street_adddress": "UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA", "lat": "32.87935255", "lng": "-117.23110049", "type": "edu", "country": "United States"}, {"name": "Purdue University", "source_name": "Purdue University", "street_adddress": "Purdue University, West Stadium Avenue, West Lafayette, Tippecanoe County, Indiana, 47907, USA", "lat": "40.43197220", "lng": "-86.92389368", "type": "edu", "country": "United States"}], "year": "2017", "pdf": ["https://arxiv.org/pdf/1705.04282.pdf"], "doi": []}, {"id": "7a9c2524a157a6b5d0204c2f3b187cd67eb26b4c", "title": "SINet: A Scale-Insensitive Convolutional Neural Network for Fast Vehicle Detection", "addresses": [{"name": "Hong Kong Polytechnic University", "source_name": "Hong Kong Polytechnic University", "street_adddress": "hong kong, 11, \u80b2\u624d\u9053 Yuk Choi Road, \u5c16\u6c99\u5480 Tsim Sha Tsui, \u6cb9\u5c16\u65fa\u5340 Yau Tsim Mong District, \u4e5d\u9f8d Kowloon, HK, 00000, \u4e2d\u56fd", "lat": "22.30457200", "lng": "114.17976285", "type": "edu", "country": "China"}, {"name": "Chinese University of Hong Kong", "source_name": "Chinese University of Hong Kong", "street_adddress": "Hong Kong, \u99ac\u6599\u6c34\u6c60\u65c1\u8def", "lat": "22.41626320", "lng": "114.21093180", "type": "edu", "country": "China"}, {"name": "South China University of Technology", "source_name": "South China University of Technology", "street_adddress": "\u534e\u5357\u7406\u5de5\u5927\u5b66, \u5927\u5b66\u57ce\u4e2d\u73af\u4e1c\u8def, \u5e7f\u5dde\u5927\u5b66\u57ce, \u65b0\u9020, \u756a\u79ba\u533a (Panyu), \u5e7f\u5dde\u5e02, \u5e7f\u4e1c\u7701, 510006, \u4e2d\u56fd", "lat": "23.05020420", "lng": "113.39880323", "type": "edu", "country": "China"}], "year": "2019", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8478157"]}, {"id": "c7f4e18ec3a805b8aee1d3d7552364c8f5f6ca1d", "title": "An Auto-adaptive CNN for Crowd Counting in Monitor Image", "addresses": [{"name": "Beijing University of Posts and Telecommunications", "source_name": "Beijing University of Posts and Telecommunications", "street_adddress": "\u5317\u4eac\u90ae\u7535\u5927\u5b66, \u897f\u571f\u57ce\u8def, \u6d77\u6dc0\u533a, \u5317\u4eac\u5e02, 100082, \u4e2d\u56fd", "lat": "39.96014880", "lng": "116.35193921", "type": "edu", "country": "China"}], "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8525541"]}, {"id": "b1db174463b0bbc54a61fcc83acfb89ad3e3d18f", "title": "Loss Functions for Multiset Prediction", "addresses": [{"name": "New York University", "source_name": "New York University", "street_adddress": "NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA", "lat": "40.72925325", "lng": "-73.99625394", "type": "edu", "country": "United States"}], "year": "2018", "pdf": ["https://arxiv.org/pdf/1711.05246.pdf"], "doi": []}, {"id": "81ba5202424906f64b77f68afca063658139fbb2", "title": "Social Scene Understanding: End-to-End Multi-person Action Localization and Collective Activity Recognition", "addresses": [{"name": "Stanford University", "source_name": "Stanford University", "street_adddress": "Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA", "lat": "37.43131385", "lng": "-122.16936535", "type": "edu", "country": "United States"}, {"name": "IDIAP Research Institute", "source_name": "IDIAP Research Institute", "street_adddress": "Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra", "lat": "46.10923700", "lng": "7.08453549", "type": "edu", "country": "Switzerland"}], "year": "2017", "pdf": ["https://arxiv.org/pdf/1611.09078.pdf"], "doi": []}, {"id": "830466596c0908399760f2009a09ce605e3121c9", "title": "Revisiting Perspective Information for Efficient Crowd Counting", "addresses": [{"name": "Peking University", "source_name": "Peking University", "street_adddress": "\u5317\u4eac\u5927\u5b66, 5\u53f7, \u9890\u548c\u56ed\u8def, \u7a3b\u9999\u56ed\u5357\u793e\u533a, \u6d77\u6dc0\u533a, \u5317\u4eac\u5e02, 100871, \u4e2d\u56fd", "lat": "39.99223790", "lng": "116.30393816", "type": "edu", "country": "China"}, {"name": "Tongji University", "source_name": "Tongji University", "street_adddress": "\u540c\u6d4e\u5927\u5b66, 1239, \u56db\u5e73\u8def, \u6c5f\u6e7e, \u8679\u53e3\u533a, \u4e0a\u6d77\u5e02, 200092, \u4e2d\u56fd", "lat": "31.28473925", "lng": "121.49694909", "type": "edu", "country": "China"}], "year": "2018", "pdf": ["https://arxiv.org/pdf/1807.01989.pdf"], "doi": []}, {"id": "1d1a2387663cb3510d6f5c0651c1dc55e48dcabc", "title": "Joint Pedestrian and Body Part Detection via Semantic Relationship Learning", "addresses": [{"name": "Hebei University of Technology", "source_name": "Hebei University of Technology, Tianjin, P. R. China", "street_adddress": "8 Dingzigu 1st Rd, Hongqiao Qu, China, 300131", "lat": "39.17963500", "lng": "117.16588200", "type": "edu", "country": "China"}, {"name": "Chinese Academy of Sciences", "source_name": "Chinese Academy of Sciences", "street_adddress": "\u4e2d\u56fd\u79d1\u5b66\u9662\u5fc3\u7406\u7814\u7a76\u6240, 16, \u6797\u8403\u8def, \u671d\u9633\u533a / Chaoyang, \u5317\u4eac\u5e02, 100101, \u4e2d\u56fd", "lat": "40.00447950", "lng": "116.37023800", "type": "edu", "country": "China"}, {"name": "Beijing Information Science and Technology University", "source_name": "Beijing Information Science and Technology University, Beijing, China", "street_adddress": "China, Beijing, Haidian, \u6e05\u6cb3\u56db\u62d4\u5b50", "lat": "40.04332040", "lng": "116.34181090", "type": "edu", "country": "China"}], "year": "2019", "pdf": ["https://pdfs.semanticscholar.org/1d1a/2387663cb3510d6f5c0651c1dc55e48dcabc.pdf"], "doi": []}, {"id": "91d2f0b7c23239740fa15dff0b5893b992c0ab34", "title": "Sequence-to-Segment Networks for Segment Detection", "addresses": [{"name": "Stony Brook University", "source_name": "Stony Brook University", "street_adddress": "Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA", "lat": "40.91531960", "lng": "-73.12706260", "type": "edu", "country": "United States"}], "year": "2018", "pdf": ["https://pdfs.semanticscholar.org/91d2/f0b7c23239740fa15dff0b5893b992c0ab34.pdf"], "doi": []}, {"id": "8e7c647d8e8ba726b03f7e7c5cc395f86b9de9be", "title": "Parsing Pose of People with Interaction", "addresses": [{"name": "California Institute of Technology", "source_name": "California Institute of Technology", "street_adddress": "California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA", "lat": "34.13710185", "lng": "-118.12527487", "type": "edu", "country": "United States"}], "year": "2018", "pdf": ["https://pdfs.semanticscholar.org/8e7c/647d8e8ba726b03f7e7c5cc395f86b9de9be.pdf"], "doi": []}, {"id": "2f02c1d4858d9c0f5d16099eb090560d5fa4f23f", "title": "Detecting Heads using Feature Refine Net and Cascaded Multi-scale Architecture", "addresses": [{"name": "South China University of Technology", "source_name": "South China University of Technology", "street_adddress": "\u534e\u5357\u7406\u5de5\u5927\u5b66, \u5927\u5b66\u57ce\u4e2d\u73af\u4e1c\u8def, \u5e7f\u5dde\u5927\u5b66\u57ce, \u65b0\u9020, \u756a\u79ba\u533a (Panyu), \u5e7f\u5dde\u5e02, \u5e7f\u4e1c\u7701, 510006, \u4e2d\u56fd", "lat": "23.05020420", "lng": "113.39880323", "type": "edu", "country": "China"}], "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8545068"]}, {"id": "c43c3ec39005aaf1f0ad2f641d92f41021342217", "title": "Pedestrian Detection with Autoregressive Network Phases", "addresses": [{"name": "Michigan State University", "source_name": "Michigan State University", "street_adddress": "Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu", "country": "United States"}], "year": "2018", "pdf": ["https://arxiv.org/pdf/1812.00440.pdf"], "doi": []}, {"id": "e1a784e360f2bf3911cffa30506d46c412659db5", "title": "MetaAnchor: Learning to Detect Objects with Customized Anchors", "addresses": [{"name": "Fudan University", "source_name": "Fudan University", "street_adddress": "\u590d\u65e6\u5927\u5b66, 220, \u90af\u90f8\u8def, \u4e94\u89d2\u573a\u8857\u9053, \u6768\u6d66\u533a, \u4e0a\u6d77\u5e02, 200433, \u4e2d\u56fd", "lat": "31.30104395", "lng": "121.50045497", "type": "edu", "country": "China"}], "year": "2018", "pdf": ["https://arxiv.org/pdf/1807.00980.pdf"], "doi": []}, {"id": "9da3ade53bf91556fe46828b216aab20a4e72294", "title": "SNc Neuron Detection Method Based on Deep Learning for Efficacy Evaluation of Anti-PD Drugs", "addresses": [{"name": "Peking University", "source_name": "Peking University", "street_adddress": "\u5317\u4eac\u5927\u5b66, 5\u53f7, \u9890\u548c\u56ed\u8def, \u7a3b\u9999\u56ed\u5357\u793e\u533a, \u6d77\u6dc0\u533a, \u5317\u4eac\u5e02, 100871, \u4e2d\u56fd", "lat": "39.99223790", "lng": "116.30393816", "type": "edu", "country": "China"}, {"name": "Shanghai Jiao Tong University", "source_name": "Shanghai Jiao Tong University", "street_adddress": "\u4e0a\u6d77\u4ea4\u901a\u5927\u5b66\uff08\u5f90\u6c47\u6821\u533a\uff09, \u6dee\u6d77\u897f\u8def, \u756a\u79ba\u5c0f\u533a, \u5e73\u9634\u6865, \u5f90\u6c47\u533a, \u4e0a\u6d77\u5e02, 200052, \u4e2d\u56fd", "lat": "31.20081505", "lng": "121.42840681", "type": "edu", "country": "China"}], "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8431470"]}, {"id": "cfa48bc1015b88809e362b4da19fe4459acb1d89", "title": "Learning to Filter Object Detections", "addresses": [{"name": "Max Planck Institute for Intelligent Systems", "source_name": "Max Planck Institute for Intelligent Systems", "street_adddress": "Heisenbergstra\u00dfe 3, 70569 Stuttgart, Germany", "lat": "48.74689390", "lng": "9.08051410", "type": "edu", "country": "Germany"}, {"name": "Microsoft", "source_name": "Microsoft Corporation, Redmond, WA, USA", "street_adddress": "One Microsoft Way, Redmond, WA 98052, USA", "lat": "47.64233180", "lng": "-122.13693020", "type": "company", "country": "United States"}], "year": "2017", "pdf": ["https://pdfs.semanticscholar.org/cfa4/8bc1015b88809e362b4da19fe4459acb1d89.pdf"], "doi": []}, {"id": "eb5780e49ad7e713e9f34cf067d45030fb29753e", "title": "Iterative fully convolutional neural networks for automatic vertebra segmentation and identification", "addresses": [{"name": "University Medical Center Utrecht", "source_name": "Rudolf Magnus Institute of Neuroscience, University Medical Center Utrecht, Utrecht, The Netherlands", "street_adddress": "Vondellaan 94, 3521 GH Utrecht, Netherlands", "lat": "52.07869500", "lng": "5.11974690", "type": "edu", "country": "Netherlands"}], "year": "2018", "pdf": ["https://arxiv.org/pdf/1804.04383.pdf"], "doi": []}, {"id": "54c9343c247ce5a82bc52f83df84c2bbae737ce4", "title": "Deep Learning on Attributed Graphs: A Journey from Graphs to Their Embeddings and Back. (L'apprentissage profond sur graphes attribu\u00e9s: Un voyage aller-retour aux plongements des graphes)", "addresses": [{"name": "INRIA Sophia Antipolis", "source_name": "INRIA Sophia Antipolis Meditérannée, Valbonne, France", "street_adddress": "2004 Route des Lucioles, 06902 Valbonne, France", "lat": "43.61581310", "lng": "7.06838000", "type": "edu", "country": "France"}], "year": "2018", "pdf": ["https://arxiv.org/pdf/1901.08296.pdf"], "doi": []}, {"id": "2d81cf3214281af85eb1d9d270a897d62302e88e", "title": "High density people estimation in video surveillance", "addresses": [{"name": "AvidBeam", "source_name": "AvidBeam", "street_adddress": "5 Wadi Al Nile, Maadi Al Khabiri Ash Sharqeyah, Al Maadi, Cairo Governorate 11728, Egypt", "lat": "29.95606300", "lng": "31.25547100", "type": "company", "country": "Egypt"}, {"name": "Faculty of Media Engineering & Technology German University in Cairo", "source_name": "Faculty of Media Engineering & Technology German University in Cairo, Egypt", "street_adddress": "Cairo Governorate, Egypt", "lat": "29.98663810", "lng": "31.44142180", "type": "edu", "country": "Egypt"}, {"name": "German University in Cairo", "source_name": "Faculty of Media Engineering and Technology, German University in Cairo, Egypt", "street_adddress": "Cairo Governorate, Egypt", "lat": "29.98663810", "lng": "31.44142180", "type": "edu", "country": "Egypt"}], "year": "2017", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8275348"]}, {"id": "d54d0dc1e7ef82f3ed0ee332e0777b6d73477a8c", "title": "Sidewalk-level People Flow Estimation Using Dashboard Cameras Based on Deep Learning", "addresses": [{"name": "Osaka University", "source_name": "Osaka University", "street_adddress": "\u5927\u962a\u5927\u5b66\u6e05\u660e\u5bee, \u670d\u90e8\u897f\u753a\u56db\u4e01\u76ee, \u8c4a\u4e2d\u5e02, \u5927\u962a\u5e9c, \u8fd1\u757f\u5730\u65b9, \u65e5\u672c", "lat": "34.80809035", "lng": "135.45785218", "type": "edu", "country": "Japan"}], "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8653595"]}, {"id": "9607f88fe6298f754e3f610c3a587d8e10b1b0a9", "title": "Scene Text Detection with Recurrent Instance Segmentation", "addresses": [{"name": "National Laboratory of Pattern Recognition", "source_name": "National Laboratory of Pattern Recognition & Chinese Academy of Sciences & University of Chinese Academy of Sciences, Beijing, China", "street_adddress": "China, Beijing, Haidian, Zhongguancun South 1st Alley, \u4e2d\u5173\u6751\u5357\u4e00\u6761", "lat": "39.98177000", "lng": "116.33008600", "type": "edu", "country": "China"}], "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8545073"]}, {"id": "ed2ba6448db8cf945ca24d4df11916c2c5c3edd1", "title": "Rapid Pedestrian Detection Based on Deep Omega-Shape Features with Partial Occlusion Handing", "addresses": [{"name": "University of Electronic Science and Technology of China", "source_name": "University of Electronic Science and Technology of China", "street_adddress": "2 Jianshe North Rd 2nd Section, Jianshe Road, Chenghua Qu, Chengdu Shi, Sichuan Sheng, China, 610054", "lat": "30.67272100", "lng": "104.09880600", "type": "edu", "country": "China"}], "year": "2018", "pdf": [], "doi": ["http://doi.org/10.1007/s11063-018-9837-1"]}, {"id": "deea683731f468c7234e1089f48c4546e7003b18", "title": "Multi-Task Vehicle Detection With Region-of-Interest Voting", "addresses": [{"name": "Alibaba Group, Hangzhou, China", "source_name": "Alibaba Group, Hangzhou, China", "street_adddress": "Alibaba Group, \u4e94\u5e38\u8857\u9053, \u4f59\u676d\u533a (Yuhang), \u676d\u5dde\u5e02 Hangzhou, \u6d59\u6c5f\u7701, \u4e2d\u56fd", "lat": "30.28106540", "lng": "120.02139087", "type": "edu", "country": "China"}, {"name": "Alibaba, Hangzhou, China", "source_name": "Alibaba, Hangzhou, China", "street_adddress": "699 Wangshang Rd, Binjiang Qu, Hangzhou Shi, Zhejiang Sheng, China", "lat": "30.18996400", "lng": "120.19210000", "type": "edu", "country": "China"}, {"name": "Zhejiang University", "source_name": "Zhejiang University", "street_adddress": "\u6d59\u6c5f\u5927\u5b66\u4e4b\u6c5f\u6821\u533a, \u4e4b\u6c5f\u8def, \u8f6c\u5858\u8857\u9053, \u897f\u6e56\u533a (Xihu), \u676d\u5dde\u5e02 Hangzhou, \u6d59\u6c5f\u7701, 310008, \u4e2d\u56fd", "lat": "30.19331415", "lng": "120.11930822", "type": "edu", "country": "China"}], "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8066331", "http://doi.org/10.1109/TIP.2017.2762591", "https://www.ncbi.nlm.nih.gov/pubmed/29028197", "https://www.wikidata.org/entity/Q50063923"]}, {"id": "1303b3e9f4bffb87b6dd34f7849f7a57e00253d1", "title": "Point in, Box out: Beyond Counting Persons in Crowds", "addresses": [{"name": "Sichuan University, Chengdu", "source_name": "Sichuan Univ., Chengdu", "street_adddress": "\u56db\u5ddd\u5927\u5b66\uff08\u534e\u897f\u6821\u533a\uff09, \u6821\u4e1c\u8def, \u6b66\u4faf\u533a, \u6b66\u4faf\u533a (Wuhou), \u6210\u90fd\u5e02 / Chengdu, \u56db\u5ddd\u7701, 610014, \u4e2d\u56fd", "lat": "30.64276900", "lng": "104.06751175", "type": "edu", "country": "China"}, {"name": "INRIA", "source_name": "INRIA Grenoble Rhone-Alpes, Grenoble, France", "street_adddress": "655 Avenue de l'Europe, 38330 Montbonnot-Saint-Martin, France", "lat": "45.21788600", "lng": "5.80736900", "type": "edu", "country": "France"}], "year": "2019", "pdf": ["https://arxiv.org/pdf/1904.01333.pdf"], "doi": []}, {"id": "4140ccbca5a585162420b82e9e4eeb9afbd1e7c2", "title": "Simulating Crowds in Egress Scenarios", "addresses": [{"name": "Pontifical Catholic University of Rio Grande do Sul", "source_name": "Pontifical Catholic University of Rio Grande do Sul", "street_adddress": "Av. Ipiranga, 6681 - Partenon, Porto Alegre - RS, 90619-900, Brazil", "lat": "-30.05934460", "lng": "-51.17349120", "type": "edu", "country": "Brazil"}, {"name": "University of Pennsylvania", "source_name": "University of Pennsylvania", "street_adddress": "Penn Museum, 3260, South Street, University City, Philadelphia, Philadelphia County, Pennsylvania, 19104, USA", "lat": "39.94923440", "lng": "-75.19198985", "type": "edu", "country": "United States"}, {"name": "Federal University of Rio Grande do Sul", "source_name": "Institute of Informatics, Federal University of Rio Grande do Sul, Porto Alegre, Brazil", "street_adddress": "Av. Paulo Gama, 110 - Farroupilha, Porto Alegre - RS, 90040-060, Brazil", "lat": "-30.03382480", "lng": "-51.21882800", "type": "edu", "country": "Brazil"}], "year": "2017", "pdf": [], "doi": ["http://doi.org/10.1007/978-3-319-65202-3"]}, {"id": "5031a110219231ceaa820725c6e77f87f7b2fde2", "title": "Training with Confusion for Fine-Grained Visual Classification", "addresses": [{"name": "Harvard University", "source_name": "Harvard University", "street_adddress": "Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA", "lat": "42.36782045", "lng": "-71.12666653", "type": "edu", "country": "United States"}, {"name": "MIT", "source_name": "Massachusetts Institute", "street_adddress": "MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA", "lat": "42.35839610", "lng": "-71.09567788", "type": "edu", "country": "United States"}, {"name": "Brigham Young University", "source_name": "Brigham Young University, Provo, USA", "street_adddress": "Provo, UT 84602, USA", "lat": "40.25184350", "lng": "-111.64931560", "type": "edu", "country": "United States"}], "year": "2017", "pdf": ["https://pdfs.semanticscholar.org/5031/a110219231ceaa820725c6e77f87f7b2fde2.pdf"], "doi": []}, {"id": "86c158ef6caaf247d5d14e07c5edded0147df8b7", "title": "Spatial Memory for Context Reasoning in Object Detection", "addresses": [{"name": "Carnegie Mellon University", "source_name": "Carnegie Mellon University Pittsburgh, PA - 15213, USA", "street_adddress": "Carnegie Mellon University, Forbes Avenue, Squirrel Hill North, PGH, Allegheny County, Pennsylvania, 15213, USA", "lat": "40.44416190", "lng": "-79.94272826", "type": "edu", "country": "United States"}], "year": "2017", "pdf": ["https://arxiv.org/pdf/1704.04224.pdf"], "doi": []}, {"id": "bba153ebdf11e6fb8716e35749c671ac96c14176", "title": "Image Crowd Counting Using Convolutional Neural Network and Markov Random Field", "addresses": [{"name": "Shanghai University", "source_name": "Shanghai University", "street_adddress": "\u4e0a\u6d77\u5927\u5b66, \u9526\u79cb\u8def, \u5927\u573a\u9547, \u5b9d\u5c71\u533a (Baoshan), \u4e0a\u6d77\u5e02, 201906, \u4e2d\u56fd", "lat": "31.32235655", "lng": "121.38400941", "type": "edu", "country": "China"}], "year": "2017", "pdf": ["https://arxiv.org/pdf/1706.03686.pdf"], "doi": []}, {"id": "fe5679c183cb432894d111bf02cc3243c89762ca", "title": "Adaptive Scenario Discovery for Crowd Counting", "addresses": [{"name": "East China Normal University", "source_name": "East China Normal University", "street_adddress": "\u534e\u4e1c\u5e08\u8303\u5927\u5b66, 3663, \u4e2d\u5c71\u5317\u8def, \u66f9\u5bb6\u6e21, \u666e\u9640\u533a, \u666e\u9640\u533a (Putuo), \u4e0a\u6d77\u5e02, 200062, \u4e2d\u56fd", "lat": "31.22849230", "lng": "121.40211389", "type": "edu", "country": "China"}], "year": "2018", "pdf": ["https://arxiv.org/pdf/1812.02393.pdf"], "doi": []}, {"id": "11824658170994e4d4655e8f688bace16a0d3e48", "title": "Multi-person Head Segmentation in Low Resolution Crowd Scenes Using Convolutional Encoder-Decoder Framework", "addresses": [{"name": "Qatar University", "source_name": "Qatar University", "street_adddress": "Qatar University, Roindabout 3, Al Tarfa (68), \u0623\u0645 \u0635\u0644\u0627\u0644, 24685, \u200f\u0642\u0637\u0631\u200e", "lat": "25.37461295", "lng": "51.48980354", "type": "edu", "country": "Qatar"}, {"name": "University of Warwick", "source_name": "University of Warwick", "street_adddress": "University of Warwick, University Road, Kirby Corner, Cannon Park, Coventry, West Midlands Combined Authority, West Midlands, England, CV4 7AL, UK", "lat": "52.37931310", "lng": "-1.56042520", "type": "edu", "country": "United Kingdom"}], "year": "2017", "pdf": ["https://pdfs.semanticscholar.org/1182/4658170994e4d4655e8f688bace16a0d3e48.pdf"], "doi": []}, {"id": "061356704ec86334dbbc073985375fe13cd39088", "title": "Very Deep Convolutional Networks for Large-Scale Image Recognition", "addresses": [{"name": "University of Oxford", "source_name": "University of Oxford", "street_adddress": "Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK", "lat": "51.75345380", "lng": "-1.25400997", "type": "edu", "country": "United Kingdom"}], "year": "2015", "pdf": ["https://arxiv.org/pdf/1409.1556.pdf"], "doi": []}, {"id": "14318685b5959b51d0f1e3db34643eb2855dc6d9", "title": "Going deeper with convolutions", "addresses": [{"name": "Google", "source_name": "Google, Inc.", "street_adddress": "1600 Amphitheatre Pkwy, Mountain View, CA 94043, USA", "lat": "37.42199990", "lng": "-122.08405750", "type": "company", "country": "United States"}, {"name": "University of Michigan", "source_name": "University of Michigan", "street_adddress": "University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA", "lat": "42.29421420", "lng": "-83.71003894", "type": "edu", "country": "United States"}, {"name": "University of North Carolina", "source_name": "University of North Carolina", "street_adddress": "University of North Carolina, Emergency Room Drive, Chapel Hill, Orange County, North Carolina, 27599, USA", "lat": "35.90503535", "lng": "-79.04775327", "type": "edu", "country": "United States"}], "year": "2015", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7298594"]}, {"id": "1bd1645a629f1b612960ab9bba276afd4cf7c666", "title": "End-to-End People Detection in Crowded Scenes", "addresses": [{"name": "Stanford University", "source_name": "Stanford University", "street_adddress": "Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA", "lat": "37.43131385", "lng": "-122.16936535", "type": "edu", "country": "United States"}], "year": "2016", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7780624"]}, {"id": "7e915bb8e4ada4f8d261bc855a4f587ea97764ca", "title": "People detection in crowded scenes via regional-based convolutional network", "addresses": [{"name": "Peking University", "source_name": "Peking University", "street_adddress": "\u5317\u4eac\u5927\u5b66, 5\u53f7, \u9890\u548c\u56ed\u8def, \u7a3b\u9999\u56ed\u5357\u793e\u533a, \u6d77\u6dc0\u533a, \u5317\u4eac\u5e02, 100871, \u4e2d\u56fd", "lat": "39.99223790", "lng": "116.30393816", "type": "edu", "country": "China"}], "year": "2016", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7877809", "http://doi.org/10.1109/ICSP.2016.7877809"]}, {"id": "591a4bfa6380c9fcd5f3ae690e3ac5c09b7bf37b", "title": "A Replacement Algorithm of Non-Maximum Suppression Base on Graph Clustering", "addresses": [{"name": "National University of Defense Technology, China", "source_name": "National University of Defence Technology, Changsha 410000, China", "street_adddress": "\u56fd\u9632\u79d1\u5b66\u6280\u672f\u5927\u5b66, \u4e09\u4e00\u5927\u9053, \u5f00\u798f\u533a, \u5f00\u798f\u533a (Kaifu), \u957f\u6c99\u5e02 / Changsha, \u6e56\u5357\u7701, 410073, \u4e2d\u56fd", "lat": "28.22902090", "lng": "112.99483204", "type": "mil", "country": "China"}], "year": "2017", "pdf": ["https://pdfs.semanticscholar.org/591a/4bfa6380c9fcd5f3ae690e3ac5c09b7bf37b.pdf"], "doi": []}, {"id": "b02d31c640b0a31fb18c4f170d841d8e21ffb66c", "title": "Localized region context and object feature fusion for people head detection", "addresses": [{"name": "National University of Defense Technology, China", "source_name": "National University of Defence Technology, Changsha 410000, China", "street_adddress": "\u56fd\u9632\u79d1\u5b66\u6280\u672f\u5927\u5b66, \u4e09\u4e00\u5927\u9053, \u5f00\u798f\u533a, \u5f00\u798f\u533a (Kaifu), \u957f\u6c99\u5e02 / Changsha, \u6e56\u5357\u7701, 410073, \u4e2d\u56fd", "lat": "28.22902090", "lng": "112.99483204", "type": "mil", "country": "China"}], "year": "2016", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7532426", "http://doi.org/10.1109/ICIP.2016.7532426"]}, {"id": "68ea88440fc48d59c7407e71a193ff1973f9ba7c", "title": "Shoulder Keypoint-Detection from Object Detection", "addresses": [{"name": "University of Ottawa", "source_name": "University of Ottawa", "street_adddress": "University of Ottawa, 1, Stewart Street, Byward Market, Lowertown, Rideau-Vanier, Ottawa, Ontario, K1N 6N5, Canada", "lat": "45.42580475", "lng": "-75.68740118", "type": "edu", "country": "Canada"}], "year": "2018", "pdf": ["https://pdfs.semanticscholar.org/68ea/88440fc48d59c7407e71a193ff1973f9ba7c.pdf"], "doi": []}]}
\ No newline at end of file diff --git a/site/datasets/unknown/brainwash.json b/site/datasets/unknown/brainwash.json index 1ecdb546..a584106d 100644 --- a/site/datasets/unknown/brainwash.json +++ b/site/datasets/unknown/brainwash.json @@ -1 +1 @@ -{"id": "1bd1645a629f1b612960ab9bba276afd4cf7c666", "citations": [{"id": "02caadae027f983261d93e40f4d9d1f785163db4", "title": "Multi-Task Deep Networks for Depth-Based 6D Object Pose and Joint Registration in Crowd Scenarios", "year": "2018", "pdf": ["https://arxiv.org/pdf/1806.03891.pdf"], "doi": []}, {"id": "6f172b6635ad9e3d3e0ab65d931dcb354eb9ff73", "title": "Accurate Single Stage Detector Using Recurrent Rolling Convolution", "year": "2017", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8099570"]}, {"id": "34786071f672b55fcdb24213a95f2ee52623ff23", "title": "MultiNet: Real-time Joint Semantic Reasoning for Autonomous Driving", "year": "2018", "pdf": ["https://arxiv.org/pdf/1612.07695.pdf"], "doi": []}, {"id": "439f6206480b3ce069d75a95b1ffed9417117a17", "title": "Representations, Analysis and Recognition of Shape and Motion from Imaging Data", "year": "2017", "pdf": [], "doi": ["https://doi.org/10.1007/978-3-030-19816-9"]}, {"id": "95addf732b584f7a2959f143d860863df3d1f320", "title": "Deep Learning on Attributed Graphs", "year": "2018", "pdf": ["https://pdfs.semanticscholar.org/95ad/df732b584f7a2959f143d860863df3d1f320.pdf"], "doi": []}, {"id": "5d49632f8c8cd06cd5ce66f007aa140f40c12c45", "title": "Bus-Crowdedness Estimation by Shallow Convolutional Neural Network", "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8615907"]}, {"id": "b80a7bbde2986a0b3474258ec2fad0a75813d89f", "title": "Context Learning Network for Object Detection", "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8637445"]}, {"id": "84afbf356669f544f6c7e19fdb273edc93bf93ee", "title": "Scatteract: Automated Extraction of Data from Scatter Plots", "year": "2017", "pdf": ["https://arxiv.org/pdf/1704.06687.pdf"], "doi": []}, {"id": "bcdb697c9d748f7655859256b0228a51b19b6fee", "title": "Parallel RCNN: A deep learning method for people detection using RGB-D images", "year": "2017", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8302069"]}, {"id": "e586547a63400881c7a95d6ad6d5fa31ac237ca9", "title": "Variational Methods for Human Modeling", "year": "2018", "pdf": ["https://pdfs.semanticscholar.org/e586/547a63400881c7a95d6ad6d5fa31ac237ca9.pdf"], "doi": []}, {"id": "99c1bc2b09210d016a252ddae051ca7cf7fe0a56", "title": "Abnormality Extraction in Crowd", "year": "2017", "pdf": ["https://pdfs.semanticscholar.org/99c1/bc2b09210d016a252ddae051ca7cf7fe0a56.pdf"], "doi": []}, {"id": "d488dad9fa81817c85a284b09ebf198bf6b640f9", "title": "FCHD: A fast and accurate head detector", "year": "2018", "pdf": ["https://arxiv.org/pdf/1809.08766.pdf"], "doi": []}, {"id": "b8c51b9ad3da310b590629b050152460abf7effb", "title": "Recent Advances in Object Detection in the Age of Deep Convolutional Neural Networks", "year": "2018", "pdf": ["https://arxiv.org/pdf/1809.03193.pdf"], "doi": []}, {"id": "f38a1edab9f0f0f36718778ac8d510fd25c41269", "title": "Adversarial Adaptation From Synthesis to Reality in Fast Detector for Smoke Detection", "year": "2019", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8657935"]}, {"id": "6a0aaefce8a27a8727d896fa444ba27558b2d381", "title": "Relation Networks for Object Detection", "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8578476"]}, {"id": "82b1ca78b2fc7ae0459f5d7c61a78822b8a590d2", "title": "Deep Semantic Instance Segmentation of Tree-Like Structures Using Synthetic Data", "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8658794"]}, {"id": "9043df1de4f6e181875011c1379d1a7f68a28d6c", "title": "People Detection from Overhead Cameras", "year": "2018", "pdf": ["https://pdfs.semanticscholar.org/9043/df1de4f6e181875011c1379d1a7f68a28d6c.pdf"], "doi": []}, {"id": "a4ffdc6f0811b5adbf41d20433a44fc546197b75", "title": "GraphVAE: Towards Generation of Small Graphs Using Variational Autoencoders", "year": "2018", "pdf": ["https://arxiv.org/pdf/1802.03480.pdf"], "doi": []}, {"id": "750bc0d2c9105a352001875127d796599a994886", "title": "Position Detection and Direction Prediction for Arbitrary-Oriented Ships via Multitask Rotation Region Convolutional Neural Network", "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8464244"]}, {"id": "28cd46a078e8fad370b1aba34762a874374513a5", "title": "cvpaper.challenge in 2016: Futuristic Computer Vision through 1, 600 Papers Survey", "year": "2017", "pdf": ["https://arxiv.org/pdf/1707.06436.pdf"], "doi": []}, {"id": "03a65d274dc6caea94f6ab344e0b4969575327e3", "title": "CrowdHuman: A Benchmark for Detecting Human in a Crowd", "year": "2018", "pdf": ["https://arxiv.org/pdf/1805.00123.pdf"], "doi": []}, {"id": "8935ffe454758e2e5def0b5190de6e28c350b3b8", "title": "Learning to Reconstruct Face Geometries Research", "year": "2017", "pdf": ["https://pdfs.semanticscholar.org/ea06/abd83c491877f0ce04cc7780ce068578f282.pdf"], "doi": []}, {"id": "993acefc2e350f9661125bb74df136e2b614ea23", "title": "People detection on the Pepper Robot using Convolutional Neural Networks and 3D Blob detection", "year": "2017", "pdf": ["https://pdfs.semanticscholar.org/993a/cefc2e350f9661125bb74df136e2b614ea23.pdf"], "doi": []}, {"id": "d78b190f98f9630cab261eabc399733af052f05c", "title": "Unsupervised Deep Domain Adaptation for Pedestrian Detection", "year": "2016", "pdf": ["https://arxiv.org/pdf/1802.03269.pdf"], "doi": []}, {"id": "08e18921d7e405ad27956c75f2613230170997d5", "title": "Towards Variational Generation of Small Graphs", "year": "2018", "pdf": ["https://pdfs.semanticscholar.org/a001/f2440fa49d1c137c9ca1b892857270096ef9.pdf"], "doi": []}]}
\ No newline at end of file +{"id": "1bd1645a629f1b612960ab9bba276afd4cf7c666", "citations": [{"id": "02caadae027f983261d93e40f4d9d1f785163db4", "title": "Multi-Task Deep Networks for Depth-Based 6D Object Pose and Joint Registration in Crowd Scenarios", "year": "2018", "pdf": ["https://arxiv.org/pdf/1806.03891.pdf"], "doi": []}, {"id": "6f172b6635ad9e3d3e0ab65d931dcb354eb9ff73", "title": "Accurate Single Stage Detector Using Recurrent Rolling Convolution", "year": "2017", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8099570"]}, {"id": "34786071f672b55fcdb24213a95f2ee52623ff23", "title": "MultiNet: Real-time Joint Semantic Reasoning for Autonomous Driving", "year": "2018", "pdf": ["https://arxiv.org/pdf/1612.07695.pdf"], "doi": []}, {"id": "439f6206480b3ce069d75a95b1ffed9417117a17", "title": "Representations, Analysis and Recognition of Shape and Motion from Imaging Data", "year": "2017", "pdf": [], "doi": ["https://doi.org/10.1007/978-3-030-19816-9"]}, {"id": "95addf732b584f7a2959f143d860863df3d1f320", "title": "Deep Learning on Attributed Graphs", "year": "2018", "pdf": ["https://pdfs.semanticscholar.org/95ad/df732b584f7a2959f143d860863df3d1f320.pdf"], "doi": []}, {"id": "5d49632f8c8cd06cd5ce66f007aa140f40c12c45", "title": "Bus-Crowdedness Estimation by Shallow Convolutional Neural Network", "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8615907"]}, {"id": "b80a7bbde2986a0b3474258ec2fad0a75813d89f", "title": "Context Learning Network for Object Detection", "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8637445"]}, {"id": "84afbf356669f544f6c7e19fdb273edc93bf93ee", "title": "Scatteract: Automated Extraction of Data from Scatter Plots", "year": "2017", "pdf": ["https://arxiv.org/pdf/1704.06687.pdf"], "doi": []}, {"id": "bcdb697c9d748f7655859256b0228a51b19b6fee", "title": "Parallel RCNN: A deep learning method for people detection using RGB-D images", "year": "2017", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8302069"]}, {"id": "e586547a63400881c7a95d6ad6d5fa31ac237ca9", "title": "Variational Methods for Human Modeling", "year": "2018", "pdf": ["https://pdfs.semanticscholar.org/e586/547a63400881c7a95d6ad6d5fa31ac237ca9.pdf"], "doi": []}, {"id": "99c1bc2b09210d016a252ddae051ca7cf7fe0a56", "title": "Abnormality Extraction in Crowd", "year": "2017", "pdf": ["https://pdfs.semanticscholar.org/99c1/bc2b09210d016a252ddae051ca7cf7fe0a56.pdf"], "doi": []}, {"id": "d488dad9fa81817c85a284b09ebf198bf6b640f9", "title": "FCHD: A fast and accurate head detector", "year": "2018", "pdf": ["https://arxiv.org/pdf/1809.08766.pdf"], "doi": []}, {"id": "b8c51b9ad3da310b590629b050152460abf7effb", "title": "Recent Advances in Object Detection in the Age of Deep Convolutional Neural Networks", "year": "2018", "pdf": ["https://arxiv.org/pdf/1809.03193.pdf"], "doi": []}, {"id": "f38a1edab9f0f0f36718778ac8d510fd25c41269", "title": "Adversarial Adaptation From Synthesis to Reality in Fast Detector for Smoke Detection", "year": "2019", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8657935"]}, {"id": "6a0aaefce8a27a8727d896fa444ba27558b2d381", "title": "Relation Networks for Object Detection", "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8578476"]}, {"id": "82b1ca78b2fc7ae0459f5d7c61a78822b8a590d2", "title": "Deep Semantic Instance Segmentation of Tree-Like Structures Using Synthetic Data", "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8658794"]}, {"id": "9043df1de4f6e181875011c1379d1a7f68a28d6c", "title": "People Detection from Overhead Cameras", "year": "2018", "pdf": ["https://pdfs.semanticscholar.org/9043/df1de4f6e181875011c1379d1a7f68a28d6c.pdf"], "doi": []}, {"id": "a4ffdc6f0811b5adbf41d20433a44fc546197b75", "title": "GraphVAE: Towards Generation of Small Graphs Using Variational Autoencoders", "year": "2018", "pdf": ["https://arxiv.org/pdf/1802.03480.pdf"], "doi": []}, {"id": "750bc0d2c9105a352001875127d796599a994886", "title": "Position Detection and Direction Prediction for Arbitrary-Oriented Ships via Multitask Rotation Region Convolutional Neural Network", "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8464244"]}, {"id": "28cd46a078e8fad370b1aba34762a874374513a5", "title": "cvpaper.challenge in 2016: Futuristic Computer Vision through 1, 600 Papers Survey", "year": "2017", "pdf": ["https://arxiv.org/pdf/1707.06436.pdf"], "doi": []}, {"id": "03a65d274dc6caea94f6ab344e0b4969575327e3", "title": "CrowdHuman: A Benchmark for Detecting Human in a Crowd", "year": "2018", "pdf": ["https://arxiv.org/pdf/1805.00123.pdf"], "doi": []}, {"id": "8935ffe454758e2e5def0b5190de6e28c350b3b8", "title": "Learning to Reconstruct Face Geometries Research", "year": "2017", "pdf": ["https://pdfs.semanticscholar.org/ea06/abd83c491877f0ce04cc7780ce068578f282.pdf"], "doi": []}, {"id": "993acefc2e350f9661125bb74df136e2b614ea23", "title": "People detection on the Pepper Robot using Convolutional Neural Networks and 3D Blob detection", "year": "2017", "pdf": ["https://pdfs.semanticscholar.org/993a/cefc2e350f9661125bb74df136e2b614ea23.pdf"], "doi": []}, {"id": "d78b190f98f9630cab261eabc399733af052f05c", "title": "Unsupervised Deep Domain Adaptation for Pedestrian Detection", "year": "2016", "pdf": ["https://arxiv.org/pdf/1802.03269.pdf"], "doi": []}, {"id": "08e18921d7e405ad27956c75f2613230170997d5", "title": "Towards Variational Generation of Small Graphs", "year": "2018", "pdf": ["https://pdfs.semanticscholar.org/a001/f2440fa49d1c137c9ca1b892857270096ef9.pdf"], "doi": []}, {"id": "f6f4d887fb62d33a9a18cbb7bc58bd6247384a35", "title": "People detection in crowded scenes using hierarchical features", "year": "2017", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8261462"]}, {"id": "0ceda9dae8b9f322df65ca2ef02caca9758aec6f", "title": "Context-Aware CNNs for Person Head Detection", "year": "2015", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7410688"]}, {"id": "9f25b9efd09cc08c1ae14f301c90b903614968d8", "title": "Deep People Detection: A Comparative Study of SSD and LSTM-decoder", "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8575768"]}]}
\ No newline at end of file diff --git a/site/datasets/verified/brainwash.json b/site/datasets/verified/brainwash.json index e60a094c..e7dd9ff5 100644 --- a/site/datasets/verified/brainwash.json +++ b/site/datasets/verified/brainwash.json @@ -1 +1 @@ -{"id": "1bd1645a629f1b612960ab9bba276afd4cf7c666", "paper": {"key": "brainwash", "name": "Brainwash", "title": "End-to-End People Detection in Crowded Scenes", "year": "2016", "addresses": [{"name": "Stanford University", "source_name": "Stanford University", "street_adddress": "Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA", "lat": "37.43131385", "lng": "-122.16936535", "type": "edu", "country": "United States"}], "vetting": {"yes": 12, "no": 42, "total": 54}}, "citations": [{"id": "7e915bb8e4ada4f8d261bc855a4f587ea97764ca", "title": "People detection in crowded scenes via regional-based convolutional network", "addresses": [{"name": "Peking University", "source_name": "Peking University", "street_adddress": "\u5317\u4eac\u5927\u5b66, 5\u53f7, \u9890\u548c\u56ed\u8def, \u7a3b\u9999\u56ed\u5357\u793e\u533a, \u6d77\u6dc0\u533a, \u5317\u4eac\u5e02, 100871, \u4e2d\u56fd", "lat": "39.99223790", "lng": "116.30393816", "type": "edu", "country": "China"}], "year": "2016", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7877809", "http://doi.org/10.1109/ICSP.2016.7877809"]}, {"id": "591a4bfa6380c9fcd5f3ae690e3ac5c09b7bf37b", "title": "A Replacement Algorithm of Non-Maximum Suppression Base on Graph Clustering", "addresses": [{"name": "National University of Defense Technology, China", "source_name": "National University of Defence Technology, Changsha 410000, China", "street_adddress": "\u56fd\u9632\u79d1\u5b66\u6280\u672f\u5927\u5b66, \u4e09\u4e00\u5927\u9053, \u5f00\u798f\u533a, \u5f00\u798f\u533a (Kaifu), \u957f\u6c99\u5e02 / Changsha, \u6e56\u5357\u7701, 410073, \u4e2d\u56fd", "lat": "28.22902090", "lng": "112.99483204", "type": "mil", "country": "China"}], "year": "2017", "pdf": ["https://pdfs.semanticscholar.org/591a/4bfa6380c9fcd5f3ae690e3ac5c09b7bf37b.pdf"], "doi": []}, {"id": "b02d31c640b0a31fb18c4f170d841d8e21ffb66c", "title": "Localized region context and object feature fusion for people head detection", "addresses": [{"name": "National University of Defense Technology, China", "source_name": "National University of Defence Technology, Changsha 410000, China", "street_adddress": "\u56fd\u9632\u79d1\u5b66\u6280\u672f\u5927\u5b66, \u4e09\u4e00\u5927\u9053, \u5f00\u798f\u533a, \u5f00\u798f\u533a (Kaifu), \u957f\u6c99\u5e02 / Changsha, \u6e56\u5357\u7701, 410073, \u4e2d\u56fd", "lat": "28.22902090", "lng": "112.99483204", "type": "mil", "country": "China"}], "year": "2016", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7532426", "http://doi.org/10.1109/ICIP.2016.7532426"]}, {"id": "81ba5202424906f64b77f68afca063658139fbb2", "title": "Social Scene Understanding: End-to-End Multi-person Action Localization and Collective Activity Recognition", "addresses": [{"name": "Stanford University", "source_name": "Stanford University", "street_adddress": "Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA", "lat": "37.43131385", "lng": "-122.16936535", "type": "edu", "country": "United States"}, {"name": "IDIAP Research Institute", "source_name": "IDIAP Research Institute", "street_adddress": "Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra", "lat": "46.10923700", "lng": "7.08453549", "type": "edu", "country": "Switzerland"}], "year": "2017", "pdf": ["https://arxiv.org/pdf/1611.09078.pdf"], "doi": []}, {"id": "2f02c1d4858d9c0f5d16099eb090560d5fa4f23f", "title": "Detecting Heads using Feature Refine Net and Cascaded Multi-scale Architecture", "addresses": [{"name": "South China University of Technology", "source_name": "South China University of Technology", "street_adddress": "\u534e\u5357\u7406\u5de5\u5927\u5b66, \u5927\u5b66\u57ce\u4e2d\u73af\u4e1c\u8def, \u5e7f\u5dde\u5927\u5b66\u57ce, \u65b0\u9020, \u756a\u79ba\u533a (Panyu), \u5e7f\u5dde\u5e02, \u5e7f\u4e1c\u7701, 510006, \u4e2d\u56fd", "lat": "23.05020420", "lng": "113.39880323", "type": "edu", "country": "China"}], "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8545068"]}, {"id": "cab97d4dc67919f965cc884d80e4d6b743a256eb", "title": "Scale Mapping and Dynamic Re-Detecting in Dense Head Detection", "addresses": [{"name": "South China University of Technology", "source_name": "South China University of Technology", "street_adddress": "\u534e\u5357\u7406\u5de5\u5927\u5b66, \u5927\u5b66\u57ce\u4e2d\u73af\u4e1c\u8def, \u5e7f\u5dde\u5927\u5b66\u57ce, \u65b0\u9020, \u756a\u79ba\u533a (Panyu), \u5e7f\u5dde\u5e02, \u5e7f\u4e1c\u7701, 510006, \u4e2d\u56fd", "lat": "23.05020420", "lng": "113.39880323", "type": "edu", "country": "China"}], "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8451653"]}]}
\ No newline at end of file +{"id": "1bd1645a629f1b612960ab9bba276afd4cf7c666", "paper": {"key": "brainwash", "name": "Brainwash", "title": "End-to-End People Detection in Crowded Scenes", "year": "2016", "addresses": [{"name": "Stanford University", "source_name": "Stanford University", "street_adddress": "Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA", "lat": "37.43131385", "lng": "-122.16936535", "type": "edu", "country": "United States"}], "vetting": {"yes": 12, "no": 42, "total": 54}}, "citations": [{"id": "7e915bb8e4ada4f8d261bc855a4f587ea97764ca", "title": "People detection in crowded scenes via regional-based convolutional network", "addresses": [{"name": "Peking University", "source_name": "Peking University", "street_adddress": "\u5317\u4eac\u5927\u5b66, 5\u53f7, \u9890\u548c\u56ed\u8def, \u7a3b\u9999\u56ed\u5357\u793e\u533a, \u6d77\u6dc0\u533a, \u5317\u4eac\u5e02, 100871, \u4e2d\u56fd", "lat": "39.99223790", "lng": "116.30393816", "type": "edu", "country": "China"}], "year": "2016", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7877809", "http://doi.org/10.1109/ICSP.2016.7877809"]}, {"id": "591a4bfa6380c9fcd5f3ae690e3ac5c09b7bf37b", "title": "A Replacement Algorithm of Non-Maximum Suppression Base on Graph Clustering", "addresses": [{"name": "National University of Defense Technology, China", "source_name": "National University of Defence Technology, Changsha 410000, China", "street_adddress": "\u56fd\u9632\u79d1\u5b66\u6280\u672f\u5927\u5b66, \u4e09\u4e00\u5927\u9053, \u5f00\u798f\u533a, \u5f00\u798f\u533a (Kaifu), \u957f\u6c99\u5e02 / Changsha, \u6e56\u5357\u7701, 410073, \u4e2d\u56fd", "lat": "28.22902090", "lng": "112.99483204", "type": "mil", "country": "China"}], "year": "2017", "pdf": ["https://pdfs.semanticscholar.org/591a/4bfa6380c9fcd5f3ae690e3ac5c09b7bf37b.pdf"], "doi": []}, {"id": "b02d31c640b0a31fb18c4f170d841d8e21ffb66c", "title": "Localized region context and object feature fusion for people head detection", "addresses": [{"name": "National University of Defense Technology, China", "source_name": "National University of Defence Technology, Changsha 410000, China", "street_adddress": "\u56fd\u9632\u79d1\u5b66\u6280\u672f\u5927\u5b66, \u4e09\u4e00\u5927\u9053, \u5f00\u798f\u533a, \u5f00\u798f\u533a (Kaifu), \u957f\u6c99\u5e02 / Changsha, \u6e56\u5357\u7701, 410073, \u4e2d\u56fd", "lat": "28.22902090", "lng": "112.99483204", "type": "mil", "country": "China"}], "year": "2016", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7532426", "http://doi.org/10.1109/ICIP.2016.7532426"]}, {"id": "81ba5202424906f64b77f68afca063658139fbb2", "title": "Social Scene Understanding: End-to-End Multi-person Action Localization and Collective Activity Recognition", "addresses": [{"name": "Stanford University", "source_name": "Stanford University", "street_adddress": "Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA", "lat": "37.43131385", "lng": "-122.16936535", "type": "edu", "country": "United States"}, {"name": "IDIAP Research Institute", "source_name": "IDIAP Research Institute", "street_adddress": "Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra", "lat": "46.10923700", "lng": "7.08453549", "type": "edu", "country": "Switzerland"}], "year": "2017", "pdf": ["https://arxiv.org/pdf/1611.09078.pdf"], "doi": []}, {"id": "2f02c1d4858d9c0f5d16099eb090560d5fa4f23f", "title": "Detecting Heads using Feature Refine Net and Cascaded Multi-scale Architecture", "addresses": [{"name": "South China University of Technology", "source_name": "South China University of Technology", "street_adddress": "\u534e\u5357\u7406\u5de5\u5927\u5b66, \u5927\u5b66\u57ce\u4e2d\u73af\u4e1c\u8def, \u5e7f\u5dde\u5927\u5b66\u57ce, \u65b0\u9020, \u756a\u79ba\u533a (Panyu), \u5e7f\u5dde\u5e02, \u5e7f\u4e1c\u7701, 510006, \u4e2d\u56fd", "lat": "23.05020420", "lng": "113.39880323", "type": "edu", "country": "China"}], "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8545068"]}, {"id": "cab97d4dc67919f965cc884d80e4d6b743a256eb", "title": "Scale Mapping and Dynamic Re-Detecting in Dense Head Detection", "addresses": [{"name": "South China University of Technology", "source_name": "South China University of Technology", "street_adddress": "\u534e\u5357\u7406\u5de5\u5927\u5b66, \u5927\u5b66\u57ce\u4e2d\u73af\u4e1c\u8def, \u5e7f\u5dde\u5927\u5b66\u57ce, \u65b0\u9020, \u756a\u79ba\u533a (Panyu), \u5e7f\u5dde\u5e02, \u5e7f\u4e1c\u7701, 510006, \u4e2d\u56fd", "lat": "23.05020420", "lng": "113.39880323", "type": "edu", "country": "China"}], "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8451653"]}, {"id": "68ea88440fc48d59c7407e71a193ff1973f9ba7c", "title": "Shoulder Keypoint-Detection from Object Detection", "addresses": [{"name": "University of Ottawa", "source_name": "University of Ottawa", "street_adddress": "University of Ottawa, 1, Stewart Street, Byward Market, Lowertown, Rideau-Vanier, Ottawa, Ontario, K1N 6N5, Canada", "lat": "45.42580475", "lng": "-75.68740118", "type": "edu", "country": "Canada"}], "year": "2018", "pdf": ["https://pdfs.semanticscholar.org/68ea/88440fc48d59c7407e71a193ff1973f9ba7c.pdf"], "doi": []}]}
\ No newline at end of file |
