From c6413f0ddba312ec7efbc41359f844eba55095ff Mon Sep 17 00:00:00 2001 From: Jules Laplace Date: Tue, 2 Apr 2019 20:41:08 +0200 Subject: modal... --- client/modalImage/modal.css | 4 ++-- client/modalImage/modalImage.container.js | 12 ++++++++++-- 2 files changed, 12 insertions(+), 4 deletions(-) (limited to 'client') diff --git a/client/modalImage/modal.css b/client/modalImage/modal.css index d9180125..fb3c7ab5 100644 --- a/client/modalImage/modal.css +++ b/client/modalImage/modal.css @@ -25,8 +25,8 @@ align-items: center; } .modal img { - max-width: 80vw; - max-height: 80vh; + max-width: 90vw; + max-height: 90vh; } .modal .caption { display: block; diff --git a/client/modalImage/modalImage.container.js b/client/modalImage/modalImage.container.js index a637deb6..55904c40 100644 --- a/client/modalImage/modalImage.container.js +++ b/client/modalImage/modalImage.container.js @@ -10,7 +10,7 @@ import csv from 'parse-csv' class ModalImage extends Component { state = { - visible: true, + visible: false, images: [], index: 0, } @@ -26,8 +26,11 @@ class ModalImage extends Component { if (document.activeElement && document.activeElement !== document.body) { return null } - // console.log(e.keyCode) + console.log(e.keyCode) switch (e.keyCode) { + case 27: // esc + this.close() + break case 37: // left this.prev() break @@ -41,16 +44,21 @@ class ModalImage extends Component { } loadImage(index) { + const { index, images } = this.state + if (!images.length) return + if (index < 0 || index >= this.images.length) return this.setState({ visible: true, index }) } prev() { const { index, images } = this.state + if (!images.length) return this.setState({ index: (images.length + index - 1) % images.length }) } next() { const { index, images } = this.state + if (!images.length) return this.setState({ index: (index + 1) % images.length }) } -- cgit v1.2.3-70-g09d2 From c0f42b7ec12c1731accfef5835d2c4df6a304791 Mon Sep 17 00:00:00 2001 From: Jules Laplace Date: Tue, 2 Apr 2019 20:44:05 +0200 Subject: modal hand --- client/modalImage/modalImage.container.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'client') diff --git a/client/modalImage/modalImage.container.js b/client/modalImage/modalImage.container.js index 55904c40..20547de3 100644 --- a/client/modalImage/modalImage.container.js +++ b/client/modalImage/modalImage.container.js @@ -44,7 +44,7 @@ class ModalImage extends Component { } loadImage(index) { - const { index, images } = this.state + const { images } = this.state if (!images.length) return if (index < 0 || index >= this.images.length) return this.setState({ visible: true, index }) -- cgit v1.2.3-70-g09d2 From e52b66449c8b56abab70db03468f2f9ae7f24ec8 Mon Sep 17 00:00:00 2001 From: Jules Laplace Date: Tue, 2 Apr 2019 21:34:00 +0200 Subject: modal hand --- client/modalImage/modalImage.container.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'client') diff --git a/client/modalImage/modalImage.container.js b/client/modalImage/modalImage.container.js index 20547de3..7ff03a3b 100644 --- a/client/modalImage/modalImage.container.js +++ b/client/modalImage/modalImage.container.js @@ -46,7 +46,7 @@ class ModalImage extends Component { loadImage(index) { const { images } = this.state if (!images.length) return - if (index < 0 || index >= this.images.length) return + if (index < 0 || index >= images.length) return this.setState({ visible: true, index }) } -- cgit v1.2.3-70-g09d2 From dffdbe1f0f1466229adb78ecd8b3bce3b2aa7445 Mon Sep 17 00:00:00 2001 From: Jules Laplace Date: Tue, 2 Apr 2019 21:46:04 +0200 Subject: modal hand --- client/modalImage/modal.css | 21 +++++++++++++++++++++ client/modalImage/modalImage.container.js | 6 +++--- 2 files changed, 24 insertions(+), 3 deletions(-) (limited to 'client') diff --git a/client/modalImage/modal.css b/client/modalImage/modal.css index fb3c7ab5..9589b8f4 100644 --- a/client/modalImage/modal.css +++ b/client/modalImage/modal.css @@ -32,6 +32,27 @@ display: block; text-align: center; } +.modal .prev span, +.modal .next span, +.modal .close span { + background: #222; + border-radius: 50%; + width: 40px; + height: 40px; + text-align: center; + display: flex; + justify-content: center; + align-items: center; + box-shadow: 0 1px 2px rgba(255,255,255,0.4); + transition: all 0.2s cubic-bezier(0,0,1,1); + user-select: none; +} +.desktop .modal .prev:hover span, +.desktop .modal .prev:hover span, +.desktop .modal .prev:hover span { + background: #000; + box-shadow: 0 1px 2px rgba(255,255,255,0.6); +} .modal .prev { position: absolute; top: 0; left: 0; diff --git a/client/modalImage/modalImage.container.js b/client/modalImage/modalImage.container.js index 7ff03a3b..5479ca5f 100644 --- a/client/modalImage/modalImage.container.js +++ b/client/modalImage/modalImage.container.js @@ -83,9 +83,9 @@ class ModalImage extends Component { {caption &&
{caption}
} -
this.prev()}className='prev'>{'<'}
-
this.next()} className='next'>{'>'}
-
this.close()} className='close'>{'x'}
+
this.prev()}className='prev'>{'<'}
+
this.next()} className='next'>{'>'}
+
this.close()} className='close'>{'×'}
) } -- cgit v1.2.3-70-g09d2 From 24e4f4af71f1e146f33688822ac3e4242339faa4 Mon Sep 17 00:00:00 2001 From: Jules Laplace Date: Wed, 3 Apr 2019 15:18:04 +0200 Subject: data --- client/index.js | 1 - site/content/pages/datasets/market_1501/index.md | 4 ++++ site/public/datasets/market_1501/index.html | 15 +++++++++++++++ 3 files changed, 19 insertions(+), 1 deletion(-) (limited to 'client') diff --git a/client/index.js b/client/index.js index 10ed8563..c003a8b3 100644 --- a/client/index.js +++ b/client/index.js @@ -24,7 +24,6 @@ function appendModalImage() { const el = document.createElement('div') document.body.appendChild(el) ReactDOM.render(, el) - console.log(el) } function fetchDataset(payload) { diff --git a/site/content/pages/datasets/market_1501/index.md b/site/content/pages/datasets/market_1501/index.md index 8d253c79..f11f170e 100644 --- a/site/content/pages/datasets/market_1501/index.md +++ b/site/content/pages/datasets/market_1501/index.md @@ -27,6 +27,10 @@ authors: Adam Harvey {% include 'map.html' %} +{% include 'chart.html' %} + +{% include 'piechart.html' %} + {% include 'supplementary_header.html' %} {% include 'citations.html' %} diff --git a/site/public/datasets/market_1501/index.html b/site/public/datasets/market_1501/index.html index 1ffd7e6c..3281a9ae 100644 --- a/site/public/datasets/market_1501/index.html +++ b/site/public/datasets/market_1501/index.html @@ -83,6 +83,21 @@

-->
+

Who used Market 1501?

+ +

+ This bar chart presents a ranking of the top countries where dataset citations originated. Mouse over individual columns to see yearly totals. These charts show at most the top 10 countries. +

+ +
+ +
+ +
+
+
+
-- cgit v1.2.3-70-g09d2 From 06f36622561838e14c93fde0cebaad47bc867d73 Mon Sep 17 00:00:00 2001 From: adamhrv Date: Wed, 10 Apr 2019 23:06:19 +0200 Subject: add citation count --- client/chart/pie.charts.js | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'client') diff --git a/client/chart/pie.charts.js b/client/chart/pie.charts.js index 84e85c3a..c3b94b7b 100644 --- a/client/chart/pie.charts.js +++ b/client/chart/pie.charts.js @@ -87,7 +87,7 @@ class PieCharts extends Component { height: countryRows.length < 4 ? 316 : 336, }} /> - {paper.name}{' dataset citations by country'} + {citations.length + ' verified ' + paper.name + ' dataset citations by country'}
- {paper.name}{' dataset citations by organization type'} + {citations.length + ' verified ' + paper.name + ' dataset citations by organization type'}
) -- cgit v1.2.3-70-g09d2 From 699d7a77b9d4120dfb75f271cb924b0e05a2fcaa Mon Sep 17 00:00:00 2001 From: Jules Laplace Date: Wed, 17 Apr 2019 20:02:42 +0200 Subject: basic mobile css --- client/map/index.js | 9 ++ megapixels/app/site/parser.py | 3 +- site/assets/css/css.css | 143 +++++++++++++++++++++---- site/content/pages/datasets/duke_mtmc/index.md | 23 ++-- site/includes/cite_our_work.html | 2 +- site/public/datasets/duke_mtmc/index.html | 53 ++++----- 6 files changed, 171 insertions(+), 62 deletions(-) (limited to 'client') diff --git a/client/map/index.js b/client/map/index.js index 475ba3c6..29fc2286 100644 --- a/client/map/index.js +++ b/client/map/index.js @@ -150,6 +150,15 @@ export default function append(el, payload) { el.removeChild(mapCover) } }) + mapCover.querySelector('div').addEventListener('touchstart', (e) => { + e.preventDefault() + }) + mapCover.querySelector('div').addEventListener('tap', () => { + map.scrollWheelZoom.enable() + if (mapCover.parentNode === el) { + el.removeChild(mapCover) + } + }) function stopPropagation(e) { e.stopPropagation() } diff --git a/megapixels/app/site/parser.py b/megapixels/app/site/parser.py index 1489d056..92d950f8 100644 --- a/megapixels/app/site/parser.py +++ b/megapixels/app/site/parser.py @@ -260,7 +260,8 @@ def format_footnotes(footnotes, s3_path): continue key, note = footnote.split(': ', 1) footnote_index_lookup[key] = index - footnote_list.append('{}_BACKLINKS'.format(key, key) + markdown(note)) + note_markup = markdown(note).replace('

', '', 1).replace('

', '', 1) + footnote_list.append('{} {}_BACKLINKS'.format(index, key, key) + note_markup) index += 1 footnote_txt = '
  • ' + '
  • '.join(footnote_list) + '
' diff --git a/site/assets/css/css.css b/site/assets/css/css.css index 492ec347..a5e36542 100644 --- a/site/assets/css/css.css +++ b/site/assets/css/css.css @@ -55,6 +55,7 @@ header .logo { margin-right: 8px; width: 20px; height: 20px; + flex: 0 0 20px; } header .site_name { font-family: 'Roboto', sans-serif; @@ -142,7 +143,7 @@ footer { color: #888; font-size: 9pt; line-height: 17px; - padding: 20px 0 20px; + padding: 20px 20px; font-family: "Roboto", sans-serif; } footer > div { @@ -161,7 +162,7 @@ footer a { transition: color 0.1s cubic-bezier(0,0,1,1); margin-right: 5px; } -footer a:hover { +.desktop footer a:hover { color: #ddd; } @@ -318,7 +319,7 @@ p.subp{ } .right-sidebar { float: right; - width: 240px; + width: 200px; margin: 0px 20px 20px 20px; padding-top: 12px; padding-left: 20px; @@ -359,10 +360,11 @@ p.subp{ text-decoration: none; border-bottom: 1px solid; } -.left-sidebar a, .right-sidebar a{ +.left-sidebar a, .right-sidebar a { border-bottom: 1px solid #666; } -.content .left-sidebar a:hover, .content .right-sidebar a:hover{ +.desktop .content .left-sidebar a:hover, +.desktop .content .right-sidebar a:hover { border-bottom: 1px solid #ccc; } @@ -389,7 +391,7 @@ code { background: rgba(255,255,255,0.1); } pre { - margin: 0 auto; + margin: 0 auto 20px auto; max-width: 720px; border: 1px solid #666; border-radius: 2px; @@ -455,8 +457,8 @@ blockquote { } /* Buttons */ -.citation-opts { +.citation-opts { } .citation-opts li{ display: inline-block; @@ -475,22 +477,24 @@ blockquote { background: #444; border:0px; } + /* footnotes */ + .footnotes hr { display: none; } -ul.footnotes{ - max-width:720px; - margin:0 auto; - font-size:12px; +ul.footnotes { + max-width: 720px; + margin: 0 auto; + font-size: 12px; } -ul.footnotes li{ - font-size:12px; - list-style-type: decimal; - margin-bottom:12px; +ul.footnotes li { + font-size: 12px; + list-style-type: none; + margin-bottom: 12px; } -ul.footnotes p{ - font-size:12px; +ul.footnotes p { + font-size: 12px; } .footnotes ol:before { content: 'Footnotes'; @@ -575,12 +579,12 @@ section.fullwidth .image { margin: 10px auto 10px auto; font-family: 'Roboto'; } -.caption a{ - color:#ccc; +.caption a { + color: #ccc; border: 0; } -.caption a:hover{ - color:#fff; +.desktop .caption a:hover { + color: #fff; border: 0; } @@ -751,7 +755,7 @@ section.fullwidth .image { .desktop .content .dataset-list a { border: none; } -.dataset-list a:hover{ +.desktop .dataset-list a:hover { border: none; } .dataset-list a:nth-child(3n+1) { background-color: rgba(255, 0, 0, 0.1); } @@ -1087,6 +1091,7 @@ a.footnote_shim { .desktop a.footnote:hover { /*background-color: #ff8;*/ color: #fff; + border: 0; } .backlinks { margin-right: 10px; @@ -1101,6 +1106,9 @@ a.footnote_shim { bottom: 5px; margin-right: 2px; } +.content .footnotes .backlinks a { + bottom: 3px; +} li p { margin: 0; padding: 0; display: inline; @@ -1113,7 +1121,7 @@ li p { margin: 10px auto; padding-bottom: 10px } -.download-btn{ +.download-btn { display: inline-block; font-size: 13px; color: #ddd; @@ -1122,4 +1130,93 @@ li p { padding: 8px 10px; border-radius: 5px; transition: all 0.1s; +} + +/* iphone/ipad css */ +@media all and (max-device-width: 1024px) { + /* header / footer */ + .slogan { + padding-left: 10px; + } + header .splash { + display: none; + } + header .links a { + margin-right: 10px; + } + + /* content */ + + .intro_section { + padding: 50px 0 20px 0; + } + .intro_section .hero_desc { + font-size: 28px; + line-height: 50px; + margin-bottom: 20px; + } + .intro_section .inner { + margin: 0; + max-width: 100%; + padding: 20px; + } + .intro_section .hero_subdesc { + max-width: 100%; + } + section h1, section h2, section h3, section h4, section h5, section h6, section p { + max-width: 100%; + } + section { + width: 100%; + padding: 0 10px; + } + .meta { + margin-right: 0px; + margin-bottom: 10px; + } + .modal img { + max-width: 100%; + } + th { + overflow: hidden; + text-overflow: ellipsis; + } + .citationHeader { + + } + section.wide { + width: 100%; + } + .map, .map .applet { + height: 360px; + } +} + +/* iphone-specific */ +@media all and (max-device-width: 640px) { + .right-sidebar { + float: none; + width: 100%; + border: 0; + margin: 0; + padding: 0 2px; + border-bottom: 1px solid #333; + } + .map, .map .applet { + height: 360px; + } + .citationBrowser input.q { + max-width: 180px; + } + .columns { + flex-direction: column; + } + .columns .column { + margin: 0; + } + .columns-2 .column, + .columns-3 .column, + .columns-4 .column { + width: 100%; + } } \ No newline at end of file diff --git a/site/content/pages/datasets/duke_mtmc/index.md b/site/content/pages/datasets/duke_mtmc/index.md index dd4551d9..2140fed7 100644 --- a/site/content/pages/datasets/duke_mtmc/index.md +++ b/site/content/pages/datasets/duke_mtmc/index.md @@ -43,7 +43,7 @@ Despite [repeated](https://www.hrw.org/news/2017/11/19/china-police-big-data-sys | Beihang University | Orientation-Guided Similarity Learning for Person Re-identification | [ieee.org](https://ieeexplore.ieee.org/document/8545620) | 2018 | ✔ | | Beihang University | Online Inter-Camera Trajectory Association Exploiting Person Re-Identification and Camera Topology | [acm.org](https://dl.acm.org/citation.cfm?id=3240663) | 2018 | ✔ | -The reasons that companies in China use the Duke MTMC dataset for research are technically no different than the reasons it is used in the United States and Europe. In fact the original creators of the dataset published a follow up report in 2017 titled [Tracking Social Groups Within and Across Cameras](https://www.semanticscholar.org/paper/Tracking-Social-Groups-Within-and-Across-Cameras-Solera-Calderara/9e644b1e33dd9367be167eb9d832174004840400) with specific applications to "automated analysis of crowds and social gatherings for surveillance and security applications". Their work, as well as the creation of the original dataset in 2014 were both supported in part by the United States Army Research Laboratory. +The reasons that companies in China use the Duke MTMC dataset for research are technically no different than the reasons it is used in the United States and Europe. In fact, the original creators of the dataset published a follow up report in 2017 titled [Tracking Social Groups Within and Across Cameras](https://www.semanticscholar.org/paper/Tracking-Social-Groups-Within-and-Across-Cameras-Solera-Calderara/9e644b1e33dd9367be167eb9d832174004840400) with specific applications to "automated analysis of crowds and social gatherings for surveillance and security applications". Their work, as well as the creation of the original dataset in 2014 were both supported in part by the United States Army Research Laboratory. Citations from the United States and Europe show a similar trend to that in China, including publicly acknowledged and verified usage of the Duke MTMC dataset supported or carried out by the United States Department of Homeland Security, IARPA, IBM, Microsoft (who provides surveillance to ICE), and Vision Semantics (who works with the UK Ministry of Defence). One [paper](https://pdfs.semanticscholar.org/59f3/57015054bab43fb8cbfd3f3dbf17b1d1f881.pdf) is even jointly published by researchers affiliated with both the University College of London and the National University of Defense Technology in China. @@ -59,14 +59,14 @@ Citations from the United States and Europe show a similar trend to that in Chin By some metrics the dataset is considered a huge success. It is regarded as highly influential research and has contributed to hundreds, if not thousands, of projects to advance artificial intelligence for person tracking and monitoring. All the above citations, regardless of which country is using it, align perfectly with the original [intent](http://vision.cs.duke.edu/DukeMTMC/) of the Duke MTMC dataset: "to accelerate advances in multi-target multi-camera tracking". -The same logic applies for all the new extensions of the Duke MTMC dataset including [Duke MTMC Re-ID](https://github.com/layumi/DukeMTMC-reID_evaluation), [Duke MTMC Video Re-ID](https://github.com/Yu-Wu/DukeMTMC-VideoReID), Duke MTMC Groups, and [Duke MTMC Attribute](https://github.com/vana77/DukeMTMC-attribute). And it also applies to all the new specialized datasets that will be created from Duke MTMC, such as the low-resolution face recognition dataset called [QMUL-SurvFace](https://qmul-survface.github.io/), which was funded in part by [SeeQuestor](https://seequestor.com), a computer vision provider to law enforcement agencies including Scotland Yards and Queensland Police. From the perspective of academic researchers, security contractors, and defense agencies using these datasets to advance their organization's work, Duke MTMC provides significant value regardless of who else is using it so long as it accelerate advances their own interests in artificial intelligence. +The same logic applies for all the new extensions of the Duke MTMC dataset including [Duke MTMC Re-ID](https://github.com/layumi/DukeMTMC-reID_evaluation), [Duke MTMC Video Re-ID](https://github.com/Yu-Wu/DukeMTMC-VideoReID), Duke MTMC Groups, and [Duke MTMC Attribute](https://github.com/vana77/DukeMTMC-attribute). And it also applies to all the new specialized datasets that will be created from Duke MTMC, such as the low-resolution face recognition dataset called [QMUL-SurvFace](https://qmul-survface.github.io/), which was funded in part by [SeeQuestor](https://seequestor.com), a computer vision provider to law enforcement agencies including Scotland Yards and Queensland Police. From the perspective of academic researchers, security contractors, and defense agencies using these datasets to advance their organization's work, Duke MTMC provides significant value regardless of who else is using it, so long as it advances their own interests in artificial intelligence. ![caption: Duke MTMC pedestrian detection saliency maps for 8 cameras deployed on campus © megapixels.cc](assets/duke_mtmc_saliencies.jpg) -But this perspective comes at significant cost to civil rights, human rights, and privacy. The creation and distribution of the Duke MTMC illustrates an egregious prioritization of surveillance technologies over individual rights, where the simple act of going to class could implicate your biometric data in a surveillance training dataset, perhaps even used by foreign defense agencies against your own ethics, against universal human rights, or against your own political interests. +But this perspective comes at significant cost to civil rights, human rights, and privacy. The creation and distribution of the Duke MTMC illustrates an egregious prioritization of surveillance technologies over individual rights, where the simple act of going to class could implicate your biometric data in a surveillance training dataset, perhaps even used by foreign defense agencies against your own ethics, against your own political interests, or against universal human rights. -For the approximately 2,000 students in Duke MTMC dataset there is unfortunately no escape. It would be impossible to remove oneself from all copies of the dataset downloaded around the world. Instead, over 2,000 students and visitors who happened to be walking to class on March 13, 2014 will forever remain in all downloaded copies of the Duke MTMC dataset and all its extensions, contributing to a global supply chain of data that powers governmental and commercial expansion of biometric surveillance technologies. +For the approximately 2,000 students in Duke MTMC dataset, there is unfortunately no escape. It would be impossible to remove oneself from all copies of the dataset downloaded around the world. Instead, over 2,000 students and visitors who happened to be walking to class on March 13, 2014 will forever remain in all downloaded copies of the Duke MTMC dataset and all its extensions, contributing to a global supply chain of data that powers governmental and commercial expansion of biometric surveillance technologies. ![caption: Duke MTMC camera views for 8 cameras deployed on campus © megapixels.cc](assets/duke_mtmc_cameras.jpg) @@ -80,7 +80,7 @@ For the approximately 2,000 students in Duke MTMC dataset there is unfortunately #### Video Timestamps -The video timestamps contain the likely, but not yet confirmed, date and times of capture. Because the video timestamps align with the start and stop [time sync data](http://vision.cs.duke.edu/DukeMTMC/details.html#time-sync) provided by the researchers, it at least aligns the relative time. The [rainy weather](https://www.wunderground.com/history/daily/KIGX/date/2014-3-19?req_city=Durham&req_state=NC&req_statename=North%20Carolina&reqdb.zip=27708&reqdb.magic=1&reqdb.wmo=99999) on that day also contribute towards the likelihood of March 14, 2014.. +The video timestamps contain the likely, but not yet confirmed, date and times of capture. Because the video timestamps align with the start and stop [time sync data](http://vision.cs.duke.edu/DukeMTMC/details.html#time-sync) provided by the researchers, it at least aligns the relative time. The [rainy weather](https://www.wunderground.com/history/daily/KIGX/date/2014-3-19?req_city=Durham&req_state=NC&req_statename=North%20Carolina&reqdb.zip=27708&reqdb.magic=1&reqdb.wmo=99999) on that day also contributes towards the likelihood of March 14, 2014. === columns 2 @@ -103,11 +103,13 @@ The video timestamps contain the likely, but not yet confirmed, date and times o === end columns -#### Notes +#### Errata -- The Duke MTMC dataset paper mentions 2,700 identities, but their ground truth file only lists annotations for 1,812 +- The Duke MTMC dataset paper mentions 2,700 identities, but their ground truth file only lists annotations for 1,812. -If you use any data from the Duke MTMC please follow their [license](http://vision.cs.duke.edu/DukeMTMC/#how-to-cite) and cite their work as: +#### Citing Duke MTMC + +If you use any data from the Duke MTMC, please follow their [license](http://vision.cs.duke.edu/DukeMTMC/#how-to-cite) and cite their work as:
 @inproceedings{ristani2016MTMC,
@@ -120,14 +122,13 @@ If you use any data from the Duke MTMC please follow their [license](http://visi
 
 {% include 'cite_our_work.html' %}
 
-
 #### ToDo
 
 - clean up citations, formatting
 
 ### Footnotes
 
-[^xinjiang_nyt]: Mozur, Paul. "One Month, 500,000 Face Scans: How China Is Using A.I. to Profile a Minority". https://www.nytimes.com/2019/04/14/technology/china-surveillance-artificial-intelligence-racial-profiling.html. April 14, 2019.
+[^duke_mtmc_orig]: "Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking". 2016. [SemanticScholar](https://www.semanticscholar.org/paper/Performance-Measures-and-a-Data-Set-for-Tracking-Ristani-Solera/27a2fad58dd8727e280f97036e0d2bc55ef5424c)
 [^sensetime_qz]: 
 [^sensenets_uyghurs]: 
-[^duke_mtmc_orig]: "Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking". 2016. [SemanticScholar](https://www.semanticscholar.org/paper/Performance-Measures-and-a-Data-Set-for-Tracking-Ristani-Solera/27a2fad58dd8727e280f97036e0d2bc55ef5424c)
\ No newline at end of file
+[^xinjiang_nyt]: Mozur, Paul. "One Month, 500,000 Face Scans: How China Is Using A.I. to Profile a Minority". https://www.nytimes.com/2019/04/14/technology/china-surveillance-artificial-intelligence-racial-profiling.html. April 14, 2019.
diff --git a/site/includes/cite_our_work.html b/site/includes/cite_our_work.html
index 810561e7..8625355e 100644
--- a/site/includes/cite_our_work.html
+++ b/site/includes/cite_our_work.html
@@ -11,7 +11,7 @@
   title = {MegaPixels: Origins, Ethics, and Privacy Implications of Publicly Available Face Recognition Image Datasets},
   year = 2019,
   url = {https://megapixels.cc/},
-  urldate = {2019-04-20}
+  urldate = {2019-04-18}
 }

diff --git a/site/public/datasets/duke_mtmc/index.html b/site/public/datasets/duke_mtmc/index.html index def7b8fa..86cccd11 100644 --- a/site/public/datasets/duke_mtmc/index.html +++ b/site/public/datasets/duke_mtmc/index.html @@ -46,9 +46,9 @@
Website
-

Duke MTMC (Multi-Target, Multi-Camera) is a dataset of surveillance video footage taken on Duke University's campus in 2014 and is used for research and development of video tracking systems, person re-identification, and low-resolution facial recognition. The dataset contains over 14 hours of synchronized surveillance video from 8 cameras at 1080p and 60FPS with over 2 million frames of 2,000 students walking to and from classes. The 8 surveillance cameras deployed on campus were specifically setup to capture students "during periods between lectures, when pedestrian traffic is heavy" 4.

+

Duke MTMC (Multi-Target, Multi-Camera) is a dataset of surveillance video footage taken on Duke University's campus in 2014 and is used for research and development of video tracking systems, person re-identification, and low-resolution facial recognition. The dataset contains over 14 hours of synchronized surveillance video from 8 cameras at 1080p and 60FPS with over 2 million frames of 2,000 students walking to and from classes. The 8 surveillance cameras deployed on campus were specifically setup to capture students "during periods between lectures, when pedestrian traffic is heavy" 1.

In this investigation into the Duke MTMC dataset we tracked down over 100 publicly available research papers that explicitly acknowledged using Duke MTMC. Our analysis shows that the dataset has spread far beyond its origins and intentions in academic research projects at Duke University. Since its publication in 2016, more than twice as many research citations originated in China as in the United States. Among these citations were papers with explicit and direct links to the Chinese military and several of the companies known to provide Chinese authorities with the oppressive surveillance technology used to monitor millions of Uighur Muslims.

-

In one 2018 paper jointly published by researchers from SenseNets and SenseTime (and funded by SenseTime Group Limited) entitled Attention-Aware Compositional Network for Person Re-identification, the Duke MTMC dataset was used for "extensive experiments" on improving person re-identification across multiple surveillance cameras with important applications in "finding missing elderly and children, and suspect tracking, etc." Both SenseNets and SenseTime have been directly linked to the providing surveillance technology to monitor Uighur Muslims in China. 2 3 1

+

In one 2018 paper jointly published by researchers from SenseNets and SenseTime (and funded by SenseTime Group Limited) entitled Attention-Aware Compositional Network for Person Re-identification, the Duke MTMC dataset was used for "extensive experiments" on improving person re-identification across multiple surveillance cameras with important applications in "finding missing elderly and children, and suspect tracking, etc." Both SenseNets and SenseTime have been directly linked to the providing surveillance technology to monitor Uighur Muslims in China. 2 3 4

 A collection of 1,600 out of the approximately 2,000 students and pedestrians in the Duke MTMC dataset. These students were also included in the Duke MTMC Re-ID dataset extension used for person re-identification, and eventually the QMUL SurvFace face recognition dataset. Open Data Commons Attribution License.
A collection of 1,600 out of the approximately 2,000 students and pedestrians in the Duke MTMC dataset. These students were also included in the Duke MTMC Re-ID dataset extension used for person re-identification, and eventually the QMUL SurvFace face recognition dataset. Open Data Commons Attribution License.

Despite repeated warnings by Human Rights Watch that the authoritarian surveillance used in China represents a violation of human rights, researchers at Duke University continued to provide open access to their dataset for anyone to use for any project. As the surveillance crisis in China grew, so did the number of citations with links to organizations complicit in the crisis. In 2018 alone there were over 70 research projects happening in China that publicly acknowledged benefiting from the Duke MTMC dataset. Amongst these were projects from SenseNets, SenseTime, CloudWalk, Megvii, Beihang University, and the PLA's National University of Defense Technology.

@@ -146,7 +146,7 @@
-

The reasons that companies in China use the Duke MTMC dataset for research are technically no different than the reasons it is used in the United States and Europe. In fact the original creators of the dataset published a follow up report in 2017 titled Tracking Social Groups Within and Across Cameras with specific applications to "automated analysis of crowds and social gatherings for surveillance and security applications". Their work, as well as the creation of the original dataset in 2014 were both supported in part by the United States Army Research Laboratory.

+

The reasons that companies in China use the Duke MTMC dataset for research are technically no different than the reasons it is used in the United States and Europe. In fact, the original creators of the dataset published a follow up report in 2017 titled Tracking Social Groups Within and Across Cameras with specific applications to "automated analysis of crowds and social gatherings for surveillance and security applications". Their work, as well as the creation of the original dataset in 2014 were both supported in part by the United States Army Research Laboratory.

Citations from the United States and Europe show a similar trend to that in China, including publicly acknowledged and verified usage of the Duke MTMC dataset supported or carried out by the United States Department of Homeland Security, IARPA, IBM, Microsoft (who provides surveillance to ICE), and Vision Semantics (who works with the UK Ministry of Defence). One paper is even jointly published by researchers affiliated with both the University College of London and the National University of Defense Technology in China.

@@ -203,9 +203,9 @@

By some metrics the dataset is considered a huge success. It is regarded as highly influential research and has contributed to hundreds, if not thousands, of projects to advance artificial intelligence for person tracking and monitoring. All the above citations, regardless of which country is using it, align perfectly with the original intent of the Duke MTMC dataset: "to accelerate advances in multi-target multi-camera tracking".

-

The same logic applies for all the new extensions of the Duke MTMC dataset including Duke MTMC Re-ID, Duke MTMC Video Re-ID, Duke MTMC Groups, and Duke MTMC Attribute. And it also applies to all the new specialized datasets that will be created from Duke MTMC, such as the low-resolution face recognition dataset called QMUL-SurvFace, which was funded in part by SeeQuestor, a computer vision provider to law enforcement agencies including Scotland Yards and Queensland Police. From the perspective of academic researchers, security contractors, and defense agencies using these datasets to advance their organization's work, Duke MTMC provides significant value regardless of who else is using it so long as it accelerate advances their own interests in artificial intelligence.

-
 Duke MTMC pedestrian detection saliency maps for 8 cameras deployed on campus © megapixels.cc
Duke MTMC pedestrian detection saliency maps for 8 cameras deployed on campus © megapixels.cc

But this perspective comes at significant cost to civil rights, human rights, and privacy. The creation and distribution of the Duke MTMC illustrates an egregious prioritization of surveillance technologies over individual rights, where the simple act of going to class could implicate your biometric data in a surveillance training dataset, perhaps even used by foreign defense agencies against your own ethics, against universal human rights, or against your own political interests.

-

For the approximately 2,000 students in Duke MTMC dataset there is unfortunately no escape. It would be impossible to remove oneself from all copies of the dataset downloaded around the world. Instead, over 2,000 students and visitors who happened to be walking to class on March 13, 2014 will forever remain in all downloaded copies of the Duke MTMC dataset and all its extensions, contributing to a global supply chain of data that powers governmental and commercial expansion of biometric surveillance technologies.

+

The same logic applies for all the new extensions of the Duke MTMC dataset including Duke MTMC Re-ID, Duke MTMC Video Re-ID, Duke MTMC Groups, and Duke MTMC Attribute. And it also applies to all the new specialized datasets that will be created from Duke MTMC, such as the low-resolution face recognition dataset called QMUL-SurvFace, which was funded in part by SeeQuestor, a computer vision provider to law enforcement agencies including Scotland Yards and Queensland Police. From the perspective of academic researchers, security contractors, and defense agencies using these datasets to advance their organization's work, Duke MTMC provides significant value regardless of who else is using it, so long as it advances their own interests in artificial intelligence.

+
 Duke MTMC pedestrian detection saliency maps for 8 cameras deployed on campus © megapixels.cc
Duke MTMC pedestrian detection saliency maps for 8 cameras deployed on campus © megapixels.cc

But this perspective comes at significant cost to civil rights, human rights, and privacy. The creation and distribution of the Duke MTMC illustrates an egregious prioritization of surveillance technologies over individual rights, where the simple act of going to class could implicate your biometric data in a surveillance training dataset, perhaps even used by foreign defense agencies against your own ethics, against your own political interests, or against universal human rights.

+

For the approximately 2,000 students in Duke MTMC dataset, there is unfortunately no escape. It would be impossible to remove oneself from all copies of the dataset downloaded around the world. Instead, over 2,000 students and visitors who happened to be walking to class on March 13, 2014 will forever remain in all downloaded copies of the Duke MTMC dataset and all its extensions, contributing to a global supply chain of data that powers governmental and commercial expansion of biometric surveillance technologies.

 Duke MTMC camera views for 8 cameras deployed on campus © megapixels.cc
Duke MTMC camera views for 8 cameras deployed on campus © megapixels.cc
 Duke MTMC camera locations on Duke University campus. Open Data Commons Attribution License.
Duke MTMC camera locations on Duke University campus. Open Data Commons Attribution License.

Who used Duke MTMC Dataset?

@@ -267,7 +267,7 @@

Supplementary Information

Video Timestamps

-

The video timestamps contain the likely, but not yet confirmed, date and times of capture. Because the video timestamps align with the start and stop time sync data provided by the researchers, it at least aligns the relative time. The rainy weather on that day also contribute towards the likelihood of March 14, 2014..

+

The video timestamps contain the likely, but not yet confirmed, date and times of capture. Because the video timestamps align with the start and stop time sync data provided by the researchers, it at least aligns the relative time. The rainy weather on that day also contributes towards the likelihood of March 14, 2014.

@@ -338,11 +338,12 @@
Camera
-

Notes

+

Errata

    -
  • The Duke MTMC dataset paper mentions 2,700 identities, but their ground truth file only lists annotations for 1,812
  • +
  • The Duke MTMC dataset paper mentions 2,700 identities, but their ground truth file only lists annotations for 1,812.
-

If you use any data from the Duke MTMC please follow their license and cite their work as:

+

Citing Duke MTMC

+

If you use any data from the Duke MTMC, please follow their license and cite their work as:

 @inproceedings{ristani2016MTMC,
  title =        {Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking},
@@ -363,7 +364,7 @@
   title = {MegaPixels: Origins, Ethics, and Privacy Implications of Publicly Available Face Recognition Image Datasets},
   year = 2019,
   url = {https://megapixels.cc/},
-  urldate = {2019-04-20}
+  urldate = {2019-04-18}
 }

@@ -371,25 +372,25 @@
  • clean up citations, formatting
-

References

References

-- cgit v1.2.3-70-g09d2 From 020c015cbca3224f023e44ee72ec11b65f9f80b1 Mon Sep 17 00:00:00 2001 From: Jules Laplace Date: Thu, 18 Apr 2019 16:12:00 +0200 Subject: modal css --- client/actions.js | 12 +-- client/applet.js | 18 ++--- client/modalImage/modal.css | 67 +++++++++++----- client/modalImage/modalImage.container.js | 6 +- client/store.js | 12 +-- site/assets/css/css.css | 124 +++++------------------------- site/assets/css/mobile.css | 91 ++++++++++++++++++++++ site/assets/img/arrow-left-black.png | Bin 0 -> 3336 bytes site/assets/img/arrow-left.png | Bin 0 -> 2183 bytes site/assets/img/arrow-right-black.png | Bin 0 -> 3380 bytes site/assets/img/arrow-right.png | Bin 0 -> 2183 bytes site/assets/img/close-black.png | Bin 0 -> 1511 bytes site/assets/img/close.png | Bin 0 -> 3738 bytes site/content/pages/datasets/uccs/index.md | 2 +- 14 files changed, 183 insertions(+), 149 deletions(-) create mode 100644 site/assets/css/mobile.css create mode 100644 site/assets/img/arrow-left-black.png create mode 100644 site/assets/img/arrow-left.png create mode 100644 site/assets/img/arrow-right-black.png create mode 100644 site/assets/img/arrow-right.png create mode 100644 site/assets/img/close-black.png create mode 100644 site/assets/img/close.png (limited to 'client') diff --git a/client/actions.js b/client/actions.js index 7007eb76..4ed6660d 100644 --- a/client/actions.js +++ b/client/actions.js @@ -1,9 +1,9 @@ -import * as faceAnalysis from './faceAnalysis/faceAnalysis.actions' -import * as faceSearch from './faceSearch/faceSearch.actions' -import * as nameSearch from './nameSearch/nameSearch.actions' +// import * as faceAnalysis from './faceAnalysis/faceAnalysis.actions' +// import * as faceSearch from './faceSearch/faceSearch.actions' +// import * as nameSearch from './nameSearch/nameSearch.actions' export { - faceAnalysis, - faceSearch, - nameSearch, + // faceAnalysis, + // faceSearch, + // nameSearch, } diff --git a/client/applet.js b/client/applet.js index db95168a..ebc0e3fc 100644 --- a/client/applet.js +++ b/client/applet.js @@ -1,8 +1,8 @@ import React, { Component } from 'react' -import { Container as FaceSearchContainer } from './faceSearch' -import { Container as FaceAnalysisContainer } from './faceAnalysis' -import { Container as NameSearchContainer } from './nameSearch' +// import { Container as FaceSearchContainer } from './faceSearch' +// import { Container as FaceAnalysisContainer } from './faceAnalysis' +// import { Container as NameSearchContainer } from './nameSearch' import { Container as DatasetListContainer } from './datasetList' import { CitationsTable, FileTable } from './table' import { CountriesByYear, PieCharts } from './chart' @@ -11,12 +11,12 @@ export default class Applet extends Component { render() { // console.log(this.props.payload.cmd) switch (this.props.payload.cmd) { - case 'face_analysis': - return - case 'face_search': - return - case 'name_search': - return + // case 'face_analysis': + // return + // case 'face_search': + // return + // case 'name_search': + // return case 'dataset_list': return case 'chart': diff --git a/client/modalImage/modal.css b/client/modalImage/modal.css index 9589b8f4..3ac9237e 100644 --- a/client/modalImage/modal.css +++ b/client/modalImage/modal.css @@ -1,7 +1,7 @@ .modal { position: fixed; top: 0; left: 0; width: 100%; height: 100%; - background: rgba(0,0,0,0.8); + background: rgba(0,0,0,0.9); color: white; display: flex; justify-content: center; @@ -25,12 +25,14 @@ align-items: center; } .modal img { - max-width: 90vw; + max-width: 85vw; max-height: 90vh; } .modal .caption { display: block; text-align: center; + background: black; + padding: 10px; } .modal .prev span, .modal .next span, @@ -53,38 +55,63 @@ background: #000; box-shadow: 0 1px 2px rgba(255,255,255,0.6); } -.modal .prev { +.modal .prev, +.modal .next, +.modal .close { position: absolute; - top: 0; left: 0; - width: 10%; + top: 0; + padding: 20px; + width: 8%; height: 100%; display: flex; justify-content: center; align-items: center; - color: white; - font-size: 40px; cursor: pointer; + transition: all 0.1s cubic-bezier(0,0,0,1); +} +.modal .prev { + left: 0; } .modal .next { - position: absolute; - top: 0; right: 0; - width: 10%; - height: 100%; - display: flex; - justify-content: center; - align-items: center; - color: white; - font-size: 40px; - cursor: pointer; + right: 0; } +.modal .prev img, +.modal .next img { + max-width: 100%; + max-height: 100%; +} + .modal .close { position: absolute; top: 0; right: 0; - width: 10vw; height: 10vw; + width: 80px; + height: 80px; + max-width: 10vw; + max-height: 10vw; display: flex; justify-content: center; align-items: center; - color: white; - font-size: 40px; cursor: pointer; + transition: all 0.1s cubic-bezier(0,0,0,1); + padding: 20px; +} +.modal .close img { + width: 100%; + height: 100%; +} + +.desktop .modal .prev:hover { + width: 9%; + left: -8px; +} +.desktop .modal .next:hover { + width: 9%; + right: -8px; +} +.desktop .modal .close:hover { + padding: 10px; +} + +@media all and (max-device-width: 1024px) { + } \ No newline at end of file diff --git a/client/modalImage/modalImage.container.js b/client/modalImage/modalImage.container.js index 5479ca5f..d6271d61 100644 --- a/client/modalImage/modalImage.container.js +++ b/client/modalImage/modalImage.container.js @@ -83,9 +83,9 @@ class ModalImage extends Component { {caption &&
{caption}
} -
this.prev()}className='prev'>{'<'}
-
this.next()} className='next'>{'>'}
-
this.close()} className='close'>{'×'}
+
this.prev()} className='prev' aria-label='Previous image' alt='Previous image'>
+
this.next()} className='next' aria-label='Next image' alt='Next image'>
+
this.close()} className='close' aria-label='Close' alt='Close'>
) } diff --git a/client/store.js b/client/store.js index e896bc58..a404a19d 100644 --- a/client/store.js +++ b/client/store.js @@ -1,14 +1,14 @@ import { applyMiddleware, compose, combineReducers, createStore } from 'redux' import thunk from 'redux-thunk' -import faceAnalysisReducer from './faceAnalysis/faceAnalysis.reducer' -import faceSearchReducer from './faceSearch/faceSearch.reducer' -import nameSearchReducer from './nameSearch/nameSearch.reducer' +// import faceAnalysisReducer from './faceAnalysis/faceAnalysis.reducer' +// import faceSearchReducer from './faceSearch/faceSearch.reducer' +// import nameSearchReducer from './nameSearch/nameSearch.reducer' const rootReducer = combineReducers({ - faceAnalysis: faceAnalysisReducer, - faceSearch: faceSearchReducer, - nameSearch: nameSearchReducer, + // faceAnalysis: faceAnalysisReducer, + // faceSearch: faceSearchReducer, + // nameSearch: nameSearchReducer, }) function configureStore(initialState = {}) { diff --git a/site/assets/css/css.css b/site/assets/css/css.css index a5e36542..48816fef 100644 --- a/site/assets/css/css.css +++ b/site/assets/css/css.css @@ -1,4 +1,10 @@ * { box-sizing: border-box; outline: 0; } +@media (prefers-reduced-motion: reduce) { + * { + animation-duration: 0.001s !important; + transition-duration: 0.001s !important; + } +} html, body { margin: 0; padding: 0; @@ -227,7 +233,7 @@ h5 { text-transform: uppercase; letter-spacing: 2px; } -.right-sidebar ul li a{ +.right-sidebar ul li a { border-bottom: 0; } th, .gray { @@ -796,30 +802,29 @@ section.fullwidth .image { padding: 0; } .dataset-list .sort-options:before { - content: 'Browse list by '; + content: 'Browse list by:'; opacity: 0.7; - margin-right: 0px; + display: block; + padding-bottom: 10px; } .dataset-list .sort-options li { display: inline-block; margin: 0; - padding: 0; cursor: pointer; margin-right: 20px; font-size: 16px; -} -.dataset-list .sort-options li:before { - content: '_'; - opacity: 0; - margin-right: 10px; + transition: background 0.2s; + background: #888; + color: #fff; + padding: 4px 6px; + border-radius: 4px; + font-weight: 500; + font-size: 14px; + cursor: pointer; } .dataset-list .sort-options li.active { - border-bottom: 1px solid; -} -.dataset-list .sort-options li.active:before { - content: '>'; - opacity: 1; - margin-right: 10px; + background: #fff; + color: #222; } .dataset_list .applet { margin-bottom: 15px; @@ -1131,92 +1136,3 @@ li p { border-radius: 5px; transition: all 0.1s; } - -/* iphone/ipad css */ -@media all and (max-device-width: 1024px) { - /* header / footer */ - .slogan { - padding-left: 10px; - } - header .splash { - display: none; - } - header .links a { - margin-right: 10px; - } - - /* content */ - - .intro_section { - padding: 50px 0 20px 0; - } - .intro_section .hero_desc { - font-size: 28px; - line-height: 50px; - margin-bottom: 20px; - } - .intro_section .inner { - margin: 0; - max-width: 100%; - padding: 20px; - } - .intro_section .hero_subdesc { - max-width: 100%; - } - section h1, section h2, section h3, section h4, section h5, section h6, section p { - max-width: 100%; - } - section { - width: 100%; - padding: 0 10px; - } - .meta { - margin-right: 0px; - margin-bottom: 10px; - } - .modal img { - max-width: 100%; - } - th { - overflow: hidden; - text-overflow: ellipsis; - } - .citationHeader { - - } - section.wide { - width: 100%; - } - .map, .map .applet { - height: 360px; - } -} - -/* iphone-specific */ -@media all and (max-device-width: 640px) { - .right-sidebar { - float: none; - width: 100%; - border: 0; - margin: 0; - padding: 0 2px; - border-bottom: 1px solid #333; - } - .map, .map .applet { - height: 360px; - } - .citationBrowser input.q { - max-width: 180px; - } - .columns { - flex-direction: column; - } - .columns .column { - margin: 0; - } - .columns-2 .column, - .columns-3 .column, - .columns-4 .column { - width: 100%; - } -} \ No newline at end of file diff --git a/site/assets/css/mobile.css b/site/assets/css/mobile.css new file mode 100644 index 00000000..6a742277 --- /dev/null +++ b/site/assets/css/mobile.css @@ -0,0 +1,91 @@ +/* MOBILE - iphone/ipad css */ + +@media all and (max-device-width: 1024px) { + /* header / footer */ + .slogan { + padding-left: 10px; + } + header .splash { + display: none; + } + header .links a { + margin-right: 14px; + font-size: 14px; + } + + /* MOBILE content */ + + .intro_section { + padding: 50px 0 20px 0; + } + .intro_section .hero_desc { + font-size: 28px; + line-height: 50px; + margin-bottom: 20px; + } + .intro_section .inner { + margin: 0; + max-width: 100%; + padding: 20px; + } + .intro_section .hero_subdesc { + max-width: 100%; + } + section h1, section h2, section h3, section h4, section h5, section h6, section p { + max-width: 100%; + } + section { + width: 100%; + padding: 0 10px; + } + .meta { + margin-right: 0px; + margin-bottom: 10px; + } + .modal img { + max-width: 100%; + } + th { + overflow: hidden; + text-overflow: ellipsis; + } + .citationHeader { + + } + section.wide { + width: 100%; + } + .map, .map .applet { + height: 360px; + } + + /* MOBILE datasets page */ +} + +/* iphone-specific */ +@media all and (max-device-width: 640px) { + .right-sidebar { + float: none; + width: 100%; + border: 0; + margin: 0 0 14px 0; + padding: 0 2px; + } + .map, .map .applet { + height: 360px; + } + .citationBrowser input.q { + max-width: 180px; + } + .columns { + flex-direction: column; + } + .columns .column { + margin: 0; + } + .columns-2 .column, + .columns-3 .column, + .columns-4 .column { + width: 100%; + } +} \ No newline at end of file diff --git a/site/assets/img/arrow-left-black.png b/site/assets/img/arrow-left-black.png new file mode 100644 index 00000000..377a696f Binary files /dev/null and b/site/assets/img/arrow-left-black.png differ diff --git a/site/assets/img/arrow-left.png b/site/assets/img/arrow-left.png new file mode 100644 index 00000000..3ddd4781 Binary files /dev/null and b/site/assets/img/arrow-left.png differ diff --git a/site/assets/img/arrow-right-black.png b/site/assets/img/arrow-right-black.png new file mode 100644 index 00000000..12dbe37c Binary files /dev/null and b/site/assets/img/arrow-right-black.png differ diff --git a/site/assets/img/arrow-right.png b/site/assets/img/arrow-right.png new file mode 100644 index 00000000..0cb2588f Binary files /dev/null and b/site/assets/img/arrow-right.png differ diff --git a/site/assets/img/close-black.png b/site/assets/img/close-black.png new file mode 100644 index 00000000..191442ac Binary files /dev/null and b/site/assets/img/close-black.png differ diff --git a/site/assets/img/close.png b/site/assets/img/close.png new file mode 100644 index 00000000..529d7302 Binary files /dev/null and b/site/assets/img/close.png differ diff --git a/site/content/pages/datasets/uccs/index.md b/site/content/pages/datasets/uccs/index.md index b6073384..de2cec4d 100644 --- a/site/content/pages/datasets/uccs/index.md +++ b/site/content/pages/datasets/uccs/index.md @@ -28,7 +28,7 @@ The UCCS dataset includes over 1,700 unique identities, most of which are studen ![caption: The location at University of Colorado Colorado Springs where students were surreptitiously photographed with a long-range surveillance camera for use in a defense and intelligence agency funded research project on face recognition. Image: Google Maps](assets/uccs_map_aerial.jpg) -The long-range surveillance images in the UnContsrained College Students dataset were taken using a Canon 7D 18-megapixel digital camera fitted with a Sigma 800mm F5.6 EX APO DG HSM telephoto lens and pointed out an office window across the university's West Lawn. The students were photographed from a distance of approximately 150 meters through an office window. "The camera [was] programmed to start capturing images at specific time intervals between classes to maximize the number of faces being captured."[^sapkota_boult] +The long-range surveillance images in the UnConsrained College Students dataset were taken using a Canon 7D 18-megapixel digital camera fitted with a Sigma 800mm F5.6 EX APO DG HSM telephoto lens and pointed out an office window across the university's West Lawn. The students were photographed from a distance of approximately 150 meters through an office window. "The camera [was] programmed to start capturing images at specific time intervals between classes to maximize the number of faces being captured."[^sapkota_boult] Their setup made it impossible for students to know they were being photographed, providing the researchers with realistic surveillance images to help build face recognition systems for real world applications for defense, intelligence, and commercial partners. ![caption: Example images from the UnConstrained College Students Dataset. ](assets/uccs_grid.jpg) -- cgit v1.2.3-70-g09d2