summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--client/applet.js7
-rw-r--r--client/index.js4
-rw-r--r--client/table/citations.table.js63
-rw-r--r--client/table/file.table.js60
-rw-r--r--client/table/index.js11
-rw-r--r--client/table/tabulator.css26
-rw-r--r--client/tables.js96
-rw-r--r--package-lock.json90
-rw-r--r--package.json1
-rw-r--r--site/includes/citations.html4
-rw-r--r--site/public/datasets/50_people_one_question/index.html4
-rw-r--r--site/public/datasets/brainwash/index.html4
-rw-r--r--site/public/datasets/celeba/index.html4
-rw-r--r--site/public/datasets/cofw/index.html4
-rw-r--r--site/public/datasets/duke_mtmc/index.html4
-rw-r--r--site/public/datasets/hrt_transgender/index.html4
-rw-r--r--site/public/datasets/index.html12
-rw-r--r--site/public/datasets/lfw/index.html4
-rw-r--r--site/public/datasets/market_1501/index.html4
-rw-r--r--site/public/datasets/msceleb/index.html143
-rw-r--r--site/public/datasets/pipa/index.html4
-rw-r--r--site/public/datasets/uccs/index.html4
-rw-r--r--site/public/datasets/viper/index.html4
23 files changed, 434 insertions, 127 deletions
diff --git a/client/applet.js b/client/applet.js
index 21e1e4fa..db95168a 100644
--- a/client/applet.js
+++ b/client/applet.js
@@ -4,11 +4,12 @@ import { Container as FaceSearchContainer } from './faceSearch'
import { Container as FaceAnalysisContainer } from './faceAnalysis'
import { Container as NameSearchContainer } from './nameSearch'
import { Container as DatasetListContainer } from './datasetList'
+import { CitationsTable, FileTable } from './table'
import { CountriesByYear, PieCharts } from './chart'
export default class Applet extends Component {
render() {
- // console.log(this.props)
+ // console.log(this.props.payload.cmd)
switch (this.props.payload.cmd) {
case 'face_analysis':
return <FaceAnalysisContainer {...this.props} />
@@ -22,6 +23,10 @@ export default class Applet extends Component {
return <CountriesByYear {...this.props} />
case 'piechart':
return <PieCharts {...this.props} />
+ case 'citations':
+ return <CitationsTable {...this.props} />
+ case 'load_file':
+ return <FileTable {...this.props} />
default:
return <pre style={{ color: '#0f0' }}>{'Megapixels'}</pre>
}
diff --git a/client/index.js b/client/index.js
index 5a7315b5..668aebfb 100644
--- a/client/index.js
+++ b/client/index.js
@@ -6,7 +6,6 @@ import { Provider } from 'react-redux'
import { toArray } from './util'
import Applet from './applet'
import { store } from './store'
-import appendTable from './tables'
import appendMap from './map'
function appendReactApplet(el, payload) {
@@ -33,7 +32,8 @@ function appendApplets(applets) {
case 'citations':
case 'load_file':
el.parentNode.classList.add('wide')
- appendTable(el, payload)
+ appendReactApplet(el, payload)
+ el.classList.add('loaded')
break
case 'map':
el.parentNode.classList.add('wide')
diff --git a/client/table/citations.table.js b/client/table/citations.table.js
new file mode 100644
index 00000000..f65998aa
--- /dev/null
+++ b/client/table/citations.table.js
@@ -0,0 +1,63 @@
+import React, { Component } from 'react'
+import { bindActionCreators } from 'redux'
+import { connect } from 'react-redux'
+import { ReactTabulator } from 'react-tabulator'
+import { Loader } from '../common'
+import { toArray, toTuples } from '../util'
+
+export const citationsColumns = [
+ { title: 'Title', field: 'title', sorter: 'string' },
+ { title: 'Institution', field: 'institution', sorter: 'string' },
+ { title: 'Country', field: 'country', sorter: 'string', width: 140 },
+ { title: 'Year', field: 'year', sorter: 'number', width: 70 },
+ { title: 'PDF', field: 'pdf_text', formatter: 'link',
+ formatterParams: { target: "_blank", urlField: 'pdf_link', },
+ sorter: 'string', width: 100 },
+]
+
+class CitationsTable extends Component {
+ render() {
+ const { payload } = this.props
+ const { paper, citations } = payload.data
+
+ if (!citations.length) return <Loader />
+
+ const formattedCitations = citations.map(citation => {
+ const pdf_link = (citation.pdf && citation.pdf.length)
+ ? citation.pdf[0]
+ : (citation.doi && citation.doi.length)
+ ? citation.doi[0]
+ : 'https://www.semanticscholar.org/paper/' + citation.id
+ let pdf_text
+ const pdf_partz = pdf_link.split('/')[2].split('.')
+ if (pdf_partz.length > 2 && pdf_partz[pdf_partz.length - 2].length == 2) {
+ pdf_text = pdf_partz.slice(-3).join('.')
+ } else {
+ pdf_text = pdf_partz.slice(-2).join('.')
+ }
+ return {
+ title: citation.title,
+ institution: citation.addresses[0].name,
+ country: citation.addresses[0].country,
+ year: citation.year,
+ pdf_link, pdf_text,
+ }
+ })
+
+ // console.log(formattedCitations)
+
+ return (
+ <ReactTabulator
+ columns={citationsColumns}
+ data={formattedCitations}
+ options={{
+ height: 311,
+ layout: 'fitColumns',
+ placeholder: 'No Data Set',
+ }}
+ />
+ )
+ }
+}
+
+export default CitationsTable
diff --git a/client/table/file.table.js b/client/table/file.table.js
new file mode 100644
index 00000000..92f5cf72
--- /dev/null
+++ b/client/table/file.table.js
@@ -0,0 +1,60 @@
+import React, { Component } from 'react'
+import { bindActionCreators } from 'redux'
+import { connect } from 'react-redux'
+import { toArray, toTuples } from '../util'
+
+import { Loader } from '../common'
+
+import csv from 'parse-csv'
+
+class FileTable extends Component {
+ state = {
+ data: []
+ }
+
+ componentDidMount() {
+ console.log(payload.url)
+ fetch(payload.url, { mode: 'cors' })
+ .then(r => r.text())
+ .then(text => {
+ try {
+ const data = csv.toJSON(text, { headers: { included: true } })
+ this.setState({ data })
+ } catch (e) {
+ console.error("error making json:", payload.url)
+ console.error(e)
+ }
+ })
+ }
+
+ getColumns(payload) {
+ let { cmd, url, fields } = payload
+ return ((fields && fields.length) ? fields[0] : '').split(', ').map(field => {
+ switch (field) {
+ default:
+ return { title: field, field: field.toLowerCase(), sorter: 'string' }
+ }
+ })
+ }
+
+ render() {
+ const { payload } = this.props
+ const { paper, citations } = payload.data
+ const columns = getColumns(payload)
+ if (!this.state.data.length) {
+ return <Loader />
+ }
+ return (
+ <ReactTabulator
+ columns={citationsColumns}
+ data={this.state.data}
+ options={{
+ height: 311,
+ layout: 'fitColumns',
+ placeholder: 'No Data Set',
+ }}
+ />
+ )
+ }
+}
+export default FileTable
diff --git a/client/table/index.js b/client/table/index.js
new file mode 100644
index 00000000..c741f33e
--- /dev/null
+++ b/client/table/index.js
@@ -0,0 +1,11 @@
+import 'react-tabulator/lib/styles.css'
+import 'react-tabulator/lib/css/tabulator_midnight.css'
+import './tabulator.css'
+
+import CitationsTable from './citations.table'
+import FileTable from './file.table'
+
+export {
+ CitationsTable,
+ FileTable,
+} \ No newline at end of file
diff --git a/client/table/tabulator.css b/client/table/tabulator.css
new file mode 100644
index 00000000..24005368
--- /dev/null
+++ b/client/table/tabulator.css
@@ -0,0 +1,26 @@
+.tabulator {
+ border-left: 1px solid #333;
+ border-bottom: 1px solid #333;
+}
+.tabulator-row.tabulator-row-odd {
+ background-color: #222;
+}
+.tabulator-row.tabulator-row-even {
+ background-color: #333;
+}
+.desktop .tabulator-row.tabulator-selectable:hover {
+ background-color: #555;
+}
+.tabulator-row .tabulator-cell {
+ border-right: 1px solid #444;
+ padding: 8px;
+}
+.tabulator .tabulator-header {
+ border-bottom: 0;
+}
+.tabulator .tabulator-header .tabulator-col {
+ border-right: 1px solid #444;
+}
+.tabulator .tabulator-tableHolder .tabulator-table {
+ background-color: #333;
+} \ No newline at end of file
diff --git a/client/tables.js b/client/tables.js
deleted file mode 100644
index 3b53b5db..00000000
--- a/client/tables.js
+++ /dev/null
@@ -1,96 +0,0 @@
-import Tabulator from 'tabulator-tables'
-import csv from 'parse-csv'
-
-const datasetColumns = [
- { title: 'Title', field: 'title', sorter: 'string' },
- { title: 'Images', field: 'images', sorter: 'number' },
- { title: 'People', field: 'people', sorter: 'number' },
- { title: 'Year', field: 'year', sorter: 'number' },
- { title: 'Citations', field: 'citations', sorter: 'number' },
- { title: 'Influenced', field: 'influenced', sorter: 'number' },
- // { title: 'Origin', field: 'origin', sorter: 'string' },
-]
-const citationsColumns = [
- { title: 'Title', field: 'title', sorter: 'string' },
- { title: 'Institution', field: 'institution', sorter: 'string' },
- { title: 'Country', field: 'country', sorter: 'string', width: 140 },
- { title: 'Year', field: 'year', sorter: 'number', width: 70 },
- { title: 'PDF', field: 'pdf', formatter: 'link',
- formatterParams: { target: "_blank", urlField: 'pdf', },
- sorter: 'string', width: 100 },
-]
-
-function getColumns(payload) {
- let { cmd, url, fields } = payload
- if (cmd === 'citations') {
- return citationsColumns
- }
- if (url && url.match('datasets.csv')) {
- return datasetColumns
- }
- return ((fields && fields.length) ? fields[0] : '').split(', ').map(field => {
- switch (field) {
- default:
- return { title: field, field: field.toLowerCase(), sorter: 'string' }
- }
- })
-}
-
-function getCitations(dataset) {
- // console.log(dataset.citations)
- // console.log(dataset.citations.map(d => [d.pdf, d.doi]))
- return dataset.citations.map(citation => ({
- title: citation.title,
- institution: citation.addresses[0].name,
- country: citation.addresses[0].country,
- year: citation.year,
- pdf: (citation.pdf && citation.pdf.length)
- ? citation.pdf[0]
- : (citation.doi && citation.doi.length)
- ? citation.doi[0]
- : "",
- }))
-}
-
-export default function append(el, payload) {
- const columns = getColumns(payload)
- // console.log(columns)
- const table = new Tabulator(el, {
- height: '311px',
- layout: 'fitColumns',
- placeholder: 'No Data Set',
- columns,
- })
- // let path = payload.opt
- // console.log(path, columns)
-
- if (payload.cmd === 'citations') {
- let { data } = payload
- if (!data) return null
- const citations = getCitations(data)
- // console.log(citations)
- table.setData(citations)
- el.classList.add('loaded')
- } else {
- fetch(payload.url, { mode: 'cors' })
- .then(r => r.text())
- .then(text => {
- try {
- // console.log(text)
- const data = csv.toJSON(text, { headers: { included: true } })
- // console.log(data)
- table.setData(data)
- el.classList.add('loaded')
- } catch (e) {
-
- console.error("error making json:", payload.url)
- console.error(e)
- // console.log(text)
- }
- })
- }
-
- // if (fields && fields.length > 1 && fields[1].indexOf('filter')) {
- // const filter = fields[1].split(' ')
- // }
-}
diff --git a/package-lock.json b/package-lock.json
index 6d36e3ff..4e9d6fac 100644
--- a/package-lock.json
+++ b/package-lock.json
@@ -216,6 +216,28 @@
"resolved": "https://registry.npmjs.org/@types/node/-/node-8.10.38.tgz",
"integrity": "sha512-EibsnbJerd0hBFaDjJStFrVbVBAtOy4dgL8zZFw0uOvPqzBAX59Ci8cgjg3+RgJIWhsB5A4c+pi+D4P9tQQh/A=="
},
+ "@types/prop-types": {
+ "version": "15.7.0",
+ "resolved": "https://registry.npmjs.org/@types/prop-types/-/prop-types-15.7.0.tgz",
+ "integrity": "sha512-eItQyV43bj4rR3JPV0Skpl1SncRCdziTEK9/v8VwXmV6d/qOUO8/EuWeHBbCZcsfSHfzI5UyMJLCSXtxxznyZg=="
+ },
+ "@types/react": {
+ "version": "16.8.10",
+ "resolved": "https://registry.npmjs.org/@types/react/-/react-16.8.10.tgz",
+ "integrity": "sha512-7bUQeZKP4XZH/aB4i7k1i5yuwymDu/hnLMhD9NjVZvQQH7ZUgRN3d6iu8YXzx4sN/tNr0bj8jgguk8hhObzGvA==",
+ "requires": {
+ "@types/prop-types": "*",
+ "csstype": "^2.2.0"
+ }
+ },
+ "@types/react-tag-autocomplete": {
+ "version": "5.6.0",
+ "resolved": "https://registry.npmjs.org/@types/react-tag-autocomplete/-/react-tag-autocomplete-5.6.0.tgz",
+ "integrity": "sha512-EsUrbpKW5agXs/NbMUQRgwtZInQbUIIPBXiUz+XcJeUP7U6BRCWjw96sQmsEPRUwO0CdPfQEd82zwpCIGEr4Ew==",
+ "requires": {
+ "@types/react": "*"
+ }
+ },
"accepts": {
"version": "1.3.5",
"resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.5.tgz",
@@ -2249,6 +2271,11 @@
"integrity": "sha1-yBSQPkViM3GgR3tAEJqq++6t27Q=",
"dev": true
},
+ "csstype": {
+ "version": "2.6.3",
+ "resolved": "https://registry.npmjs.org/csstype/-/csstype-2.6.3.tgz",
+ "integrity": "sha512-rINUZXOkcBmoHWEyu7JdHu5JMzkGRoMX4ov9830WNgxf5UYxcBUO0QTKAqeJ5EZfSdlrcJYkC8WwfVW7JYi4yg=="
+ },
"csv-parse": {
"version": "4.2.0",
"resolved": "https://registry.npmjs.org/csv-parse/-/csv-parse-4.2.0.tgz",
@@ -4647,6 +4674,11 @@
"wbuf": "^1.1.0"
}
},
+ "html-attributes": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/html-attributes/-/html-attributes-1.1.0.tgz",
+ "integrity": "sha1-ggJ6T6x6YHDqbBjMOIauoY1t6gk="
+ },
"html-entities": {
"version": "1.2.1",
"resolved": "https://registry.npmjs.org/html-entities/-/html-entities-1.2.1.tgz",
@@ -5594,9 +5626,9 @@
"integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ=="
},
"js-yaml": {
- "version": "3.12.0",
- "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.12.0.tgz",
- "integrity": "sha512-PIt2cnwmPfL4hKNwqeiuz4bKfnzHTBv6HyVgjahA6mPLwPDzjDWrplJBMjHUFxku/N3FlmrbyPclad+I+4mJ3A==",
+ "version": "3.13.0",
+ "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.13.0.tgz",
+ "integrity": "sha512-pZZoSxcCYco+DIKBTimr67J6Hy+EYGZDY/HCWC+iAEA9h1ByhMXAIVUXMcMFpOCxQ/xjXmPI2MkDL5HRm5eFrQ==",
"dev": true,
"requires": {
"argparse": "^1.0.7",
@@ -5781,6 +5813,11 @@
"resolved": "https://registry.npmjs.org/lodash.get/-/lodash.get-4.4.2.tgz",
"integrity": "sha1-LRd/ZS+jHpObRDjVNBSZ36OCXpk="
},
+ "lodash.isplainobject": {
+ "version": "4.0.6",
+ "resolved": "https://registry.npmjs.org/lodash.isplainobject/-/lodash.isplainobject-4.0.6.tgz",
+ "integrity": "sha1-fFJqUtibRcRcxpC4gWO+BJf1UMs="
+ },
"lodash.merge": {
"version": "4.6.1",
"resolved": "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.1.tgz",
@@ -6710,6 +6747,16 @@
"sha.js": "^2.4.8"
}
},
+ "pick-react-known-prop": {
+ "version": "0.1.5",
+ "resolved": "https://registry.npmjs.org/pick-react-known-prop/-/pick-react-known-prop-0.1.5.tgz",
+ "integrity": "sha512-SnDf64AVdvqoAFpHeZUKT9kdn40Ellj84CPALRxYWqNJ6r6f44eAAT+Jtkb0Suhiw7yg5BdOFAQ25OJnjG+afw==",
+ "requires": {
+ "html-attributes": "^1.1.0",
+ "lodash.isplainobject": "^4.0.6",
+ "svg-attributes": "^1.0.0"
+ }
+ },
"pify": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/pify/-/pify-3.0.0.tgz",
@@ -7274,6 +7321,36 @@
"spin.js": "^2.0.1"
}
},
+ "react-tabulator": {
+ "version": "0.9.1",
+ "resolved": "https://registry.npmjs.org/react-tabulator/-/react-tabulator-0.9.1.tgz",
+ "integrity": "sha512-KLkO17TZbGKzwaCPD8c84cG94OkSpU0zyvlhOleKJELQWcHEL99+63DEamEaXOsguDfxM474lxu3K+jqG2bW/Q==",
+ "requires": {
+ "@types/react-tag-autocomplete": "^5.6.0",
+ "date-fns": "v2.0.0-alpha.25",
+ "dotenv": "^6.1.0",
+ "pick-react-known-prop": "^0.1.5",
+ "react-tag-autocomplete": "^5.7.1",
+ "tabulator-tables": "^4.2.3"
+ },
+ "dependencies": {
+ "date-fns": {
+ "version": "2.0.0-alpha.25",
+ "resolved": "https://registry.npmjs.org/date-fns/-/date-fns-2.0.0-alpha.25.tgz",
+ "integrity": "sha512-iQzJkHF0L4wah9Ae9PkvwemwFz6qmRLuNZcghmvf2t+ptLs1qXzONLiGtjmPQzL6+JpC01JjlTopY2AEy4NFAg=="
+ },
+ "tabulator-tables": {
+ "version": "4.2.3",
+ "resolved": "https://registry.npmjs.org/tabulator-tables/-/tabulator-tables-4.2.3.tgz",
+ "integrity": "sha512-vMQ/8/HSKzOdn1zd9uv7EmnBnMTlX8JMhfxAxEUkM12qYiqhapWp/iN2ErtDX2cVi+4CUaEn61qydSFJyKjdYA=="
+ }
+ }
+ },
+ "react-tag-autocomplete": {
+ "version": "5.8.2",
+ "resolved": "https://registry.npmjs.org/react-tag-autocomplete/-/react-tag-autocomplete-5.8.2.tgz",
+ "integrity": "sha512-GkOQrSLjvWo98IeqRuGgc77zaxSMyMjy+b2Rc+m9jMKTWopF9h5Lf2F/X1oK9hcnUCeUmJ5QVpc/dx9MgOA2Iw=="
+ },
"read-pkg": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/read-pkg/-/read-pkg-2.0.0.tgz",
@@ -8546,7 +8623,7 @@
},
"sprintf-js": {
"version": "1.0.3",
- "resolved": "http://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz",
+ "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz",
"integrity": "sha1-BOaSb2YolTVPPdAVIDYzuFcpfiw=",
"dev": true
},
@@ -8722,6 +8799,11 @@
"integrity": "sha1-U10EXOa2Nj+kARcIRimZXp3zJMc=",
"dev": true
},
+ "svg-attributes": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/svg-attributes/-/svg-attributes-1.0.0.tgz",
+ "integrity": "sha1-tcWWjzYke32+OFMgfyqcaK2Aa/w="
+ },
"svgtodatauri": {
"version": "0.0.0",
"resolved": "https://registry.npmjs.org/svgtodatauri/-/svgtodatauri-0.0.0.tgz",
diff --git a/package.json b/package.json
index 4cd2f10d..6238e7e3 100644
--- a/package.json
+++ b/package.json
@@ -55,6 +55,7 @@
"react-router": "^4.3.1",
"react-router-dom": "^4.3.1",
"react-spin": "^0.6.2",
+ "react-tabulator": "^0.9.1",
"redux": "^4.0.0",
"redux-thunk": "^2.3.0",
"snapsvg": "^0.5.1",
diff --git a/site/includes/citations.html b/site/includes/citations.html
index 74ac5cdc..32558d4a 100644
--- a/site/includes/citations.html
+++ b/site/includes/citations.html
@@ -2,10 +2,10 @@
<h3>Dataset Citations</h3>
<p>
- The dataset citations used in the visualizations were collected from <a href="https://www.semanticscholar.org">Semantic Scholar</a>, a website that aggregates and indexes research papers. Each citation has been geocoded using names of institutions found in the PDF front matter, or as listed on other resources then manually verified to show that researchers downloaded and used the dataset to train and/or test machine learning algorithms.
+ The dataset citations used in the visualizations were collected from <a href="https://www.semanticscholar.org">Semantic Scholar</a>, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms.
</p>
<p>
- Add [button/link] to download CSV. Add search input field to filter. Expand number of rows to 10. Reduce URL text to show only the domain (ie https://arxiv.org/pdf/123456 --> arxiv.org)
+ Add [button/link] to download CSV. Add search input field to filter.
</p>
<div class="applet" data-payload="{&quot;command&quot;: &quot;citations&quot;}"></div>
diff --git a/site/public/datasets/50_people_one_question/index.html b/site/public/datasets/50_people_one_question/index.html
index 9ed6d11e..988ce2dc 100644
--- a/site/public/datasets/50_people_one_question/index.html
+++ b/site/public/datasets/50_people_one_question/index.html
@@ -82,10 +82,10 @@
<h3>Citations</h3>
<p>
Citations were collected from <a href="https://www.semanticscholar.org">Semantic Scholar</a>, a website which aggregates
- and indexes research papers. The citations were geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train and/or test machine learning algorithms.
+ and indexes research papers. The citations were geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms.
</p>
<p>
- Add [button/link] to download CSV. Add search input field to filter. Expand number of rows to 10. Reduce URL text to show only the domain (ie https://arxiv.org/pdf/123456 --> arxiv.org)
+ Add [button/link] to download CSV. Add search input field to filter.
</p>
<div class="applet" data-payload="{&quot;command&quot;: &quot;citations&quot;}"></div>
diff --git a/site/public/datasets/brainwash/index.html b/site/public/datasets/brainwash/index.html
index fb08c168..20f2f096 100644
--- a/site/public/datasets/brainwash/index.html
+++ b/site/public/datasets/brainwash/index.html
@@ -107,10 +107,10 @@
<h3>Citations</h3>
<p>
Citations were collected from <a href="https://www.semanticscholar.org">Semantic Scholar</a>, a website which aggregates
- and indexes research papers. The citations were geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train and/or test machine learning algorithms.
+ and indexes research papers. The citations were geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms.
</p>
<p>
- Add [button/link] to download CSV. Add search input field to filter. Expand number of rows to 10. Reduce URL text to show only the domain (ie https://arxiv.org/pdf/123456 --> arxiv.org)
+ Add [button/link] to download CSV. Add search input field to filter.
</p>
<div class="applet" data-payload="{&quot;command&quot;: &quot;citations&quot;}"></div>
diff --git a/site/public/datasets/celeba/index.html b/site/public/datasets/celeba/index.html
index a0f0c683..07522561 100644
--- a/site/public/datasets/celeba/index.html
+++ b/site/public/datasets/celeba/index.html
@@ -82,10 +82,10 @@
<h3>Citations</h3>
<p>
Citations were collected from <a href="https://www.semanticscholar.org">Semantic Scholar</a>, a website which aggregates
- and indexes research papers. The citations were geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train and/or test machine learning algorithms.
+ and indexes research papers. The citations were geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms.
</p>
<p>
- Add [button/link] to download CSV. Add search input field to filter. Expand number of rows to 10. Reduce URL text to show only the domain (ie https://arxiv.org/pdf/123456 --> arxiv.org)
+ Add [button/link] to download CSV. Add search input field to filter.
</p>
<div class="applet" data-payload="{&quot;command&quot;: &quot;citations&quot;}"></div>
diff --git a/site/public/datasets/cofw/index.html b/site/public/datasets/cofw/index.html
index 640eab52..99d4a9ef 100644
--- a/site/public/datasets/cofw/index.html
+++ b/site/public/datasets/cofw/index.html
@@ -92,10 +92,10 @@ To increase the number of training images, and since COFW has the exact same la
<h3>Citations</h3>
<p>
Citations were collected from <a href="https://www.semanticscholar.org">Semantic Scholar</a>, a website which aggregates
- and indexes research papers. The citations were geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train and/or test machine learning algorithms.
+ and indexes research papers. The citations were geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms.
</p>
<p>
- Add [button/link] to download CSV. Add search input field to filter. Expand number of rows to 10. Reduce URL text to show only the domain (ie https://arxiv.org/pdf/123456 --> arxiv.org)
+ Add [button/link] to download CSV. Add search input field to filter.
</p>
<div class="applet" data-payload="{&quot;command&quot;: &quot;citations&quot;}"></div>
diff --git a/site/public/datasets/duke_mtmc/index.html b/site/public/datasets/duke_mtmc/index.html
index 160888e2..431cf7ff 100644
--- a/site/public/datasets/duke_mtmc/index.html
+++ b/site/public/datasets/duke_mtmc/index.html
@@ -103,10 +103,10 @@
<h3>Citations</h3>
<p>
Citations were collected from <a href="https://www.semanticscholar.org">Semantic Scholar</a>, a website which aggregates
- and indexes research papers. The citations were geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train and/or test machine learning algorithms.
+ and indexes research papers. The citations were geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms.
</p>
<p>
- Add [button/link] to download CSV. Add search input field to filter. Expand number of rows to 10. Reduce URL text to show only the domain (ie https://arxiv.org/pdf/123456 --> arxiv.org)
+ Add [button/link] to download CSV. Add search input field to filter.
</p>
<div class="applet" data-payload="{&quot;command&quot;: &quot;citations&quot;}"></div>
diff --git a/site/public/datasets/hrt_transgender/index.html b/site/public/datasets/hrt_transgender/index.html
index 8b870128..7e10c2fb 100644
--- a/site/public/datasets/hrt_transgender/index.html
+++ b/site/public/datasets/hrt_transgender/index.html
@@ -102,10 +102,10 @@
<h3>Citations</h3>
<p>
Citations were collected from <a href="https://www.semanticscholar.org">Semantic Scholar</a>, a website which aggregates
- and indexes research papers. The citations were geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train and/or test machine learning algorithms.
+ and indexes research papers. The citations were geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms.
</p>
<p>
- Add [button/link] to download CSV. Add search input field to filter. Expand number of rows to 10. Reduce URL text to show only the domain (ie https://arxiv.org/pdf/123456 --> arxiv.org)
+ Add [button/link] to download CSV. Add search input field to filter.
</p>
<div class="applet" data-payload="{&quot;command&quot;: &quot;citations&quot;}"></div>
diff --git a/site/public/datasets/index.html b/site/public/datasets/index.html
index f618e86b..03b38f8a 100644
--- a/site/public/datasets/index.html
+++ b/site/public/datasets/index.html
@@ -85,6 +85,18 @@
</div>
</a>
+ <a href="/datasets/msceleb/" style="background-image: url(https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/msceleb/assets/index.jpg)">
+ <div class="dataset">
+ <span class='title'>MS Celeb</span>
+ <div class='fields'>
+ <div class='year visible'><span>2016</span></div>
+ <div class='purpose'><span>face recognition</span></div>
+ <div class='images'><span>1,000,000 images</span></div>
+ <div class='identities'><span>100,000 </span></div>
+ </div>
+ </div>
+ </a>
+
<a href="/datasets/pipa/" style="background-image: url(https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/pipa/assets/index.jpg)">
<div class="dataset">
<span class='title'>People in Photo Albums</span>
diff --git a/site/public/datasets/lfw/index.html b/site/public/datasets/lfw/index.html
index dd94c8c5..9cbf2e11 100644
--- a/site/public/datasets/lfw/index.html
+++ b/site/public/datasets/lfw/index.html
@@ -118,10 +118,10 @@
<h3>Citations</h3>
<p>
Citations were collected from <a href="https://www.semanticscholar.org">Semantic Scholar</a>, a website which aggregates
- and indexes research papers. The citations were geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train and/or test machine learning algorithms.
+ and indexes research papers. The citations were geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms.
</p>
<p>
- Add [button/link] to download CSV. Add search input field to filter. Expand number of rows to 10. Reduce URL text to show only the domain (ie https://arxiv.org/pdf/123456 --> arxiv.org)
+ Add [button/link] to download CSV. Add search input field to filter.
</p>
<div class="applet" data-payload="{&quot;command&quot;: &quot;citations&quot;}"></div>
diff --git a/site/public/datasets/market_1501/index.html b/site/public/datasets/market_1501/index.html
index b3170925..b7e68c47 100644
--- a/site/public/datasets/market_1501/index.html
+++ b/site/public/datasets/market_1501/index.html
@@ -80,10 +80,10 @@
<h3>Citations</h3>
<p>
Citations were collected from <a href="https://www.semanticscholar.org">Semantic Scholar</a>, a website which aggregates
- and indexes research papers. The citations were geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train and/or test machine learning algorithms.
+ and indexes research papers. The citations were geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms.
</p>
<p>
- Add [button/link] to download CSV. Add search input field to filter. Expand number of rows to 10. Reduce URL text to show only the domain (ie https://arxiv.org/pdf/123456 --> arxiv.org)
+ Add [button/link] to download CSV. Add search input field to filter.
</p>
<div class="applet" data-payload="{&quot;command&quot;: &quot;citations&quot;}"></div>
diff --git a/site/public/datasets/msceleb/index.html b/site/public/datasets/msceleb/index.html
new file mode 100644
index 00000000..50788aad
--- /dev/null
+++ b/site/public/datasets/msceleb/index.html
@@ -0,0 +1,143 @@
+<!doctype html>
+<html>
+<head>
+ <title>MegaPixels</title>
+ <meta charset="utf-8" />
+ <meta name="author" content="Adam Harvey" />
+ <meta name="description" content="MS Celeb is a dataset of web images used for training and evaluating face recognition algorithms" />
+ <meta name="referrer" content="no-referrer" />
+ <meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes" />
+ <link rel='stylesheet' href='/assets/css/fonts.css' />
+ <link rel='stylesheet' href='/assets/css/tabulator.css' />
+ <link rel='stylesheet' href='/assets/css/css.css' />
+ <link rel='stylesheet' href='/assets/css/leaflet.css' />
+ <link rel='stylesheet' href='/assets/css/applets.css' />
+</head>
+<body>
+ <header>
+ <a class='slogan' href="/">
+ <div class='logo'></div>
+ <div class='site_name'>MegaPixels</div>
+ </a>
+ <div class='links'>
+ <a href="/datasets/">Datasets</a>
+ <a href="/about/">About</a>
+ </div>
+ </header>
+ <div class="content content-dataset">
+
+ <section class='intro_section' style='background-image: url(https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/msceleb/assets/background.jpg)'><div class='inner'><div class='hero_desc'><span class='bgpad'>MS Celeb is a dataset of web images used for training and evaluating face recognition algorithms</span></div><div class='hero_subdesc'><span class='bgpad'>The MS Celeb dataset includes over 10,000,000 images and 93,000 identities of semi-public figures collected using the Bing search engine
+</span></div></div></section><section><div class='left-sidebar'><div class='meta'><div><div class='gray'>Published</div><div>TBD</div></div><div><div class='gray'>Images</div><div>TBD</div></div><div><div class='gray'>Faces</div><div>TBD</div></div><div><div class='gray'>Created by</div><div>TBD</div></div></div></div><h2>Microsoft Celeb Dataset (MS Celeb)</h2>
+<p>(PAGE UNDER DEVELOPMENT)</p>
+<p>At vero eos et accusamus et iusto odio dignissimos ducimus, qui blanditiis praesentium voluptatum deleniti atque corrupti, quos dolores et quas molestias excepturi sint, obcaecati cupiditate non-provident, similique sunt in culpa, qui officia deserunt mollitia animi, id est laborum et dolorum fuga. Et harum quidem rerum facilis est et expedita distinctio.</p>
+<p>Nam libero tempore, cum soluta nobis est eligendi optio, cumque nihil impedit, quo minus id, quod maxime placeat, facere possimus, omnis voluptas assumenda est, omnis dolor repellendus. Temporibus autem quibusdam et aut officiis debitis aut rerum necessitatibus saepe eveniet, ut et voluptates repudiandae sint et molestiae non-recusandae. Itaque earum rerum hic tenetur a sapiente delectus, ut aut reiciendis voluptatibus maiores alias consequatur aut perferendis doloribus asperiores repellat</p>
+</section><section>
+ <h3>Who used MsCeleb?</h3>
+
+ <p>
+ This bar chart presents a ranking of the top countries where citations originated. Mouse over individual columns
+ to see yearly totals. These charts show at most the top 10 countries.
+ </p>
+
+ </section>
+
+<section class="applet_container">
+<!-- <div style="position: absolute;top: 0px;right: -55px;width: 180px;font-size: 14px;">Labeled Faces in the Wild Dataset<br><span class="numc" style="font-size: 11px;">20 citations</span>
+</div> -->
+ <div class="applet" data-payload="{&quot;command&quot;: &quot;chart&quot;}"></div>
+</section><section>
+ <p>
+ These pie charts show overall totals based on country and institution type.
+ </p>
+
+ </section>
+
+<section class="applet_container">
+ <div class="applet" data-payload="{&quot;command&quot;: &quot;piechart&quot;}"></div>
+</section><section>
+
+ <h3>Information Supply Chain</h3>
+<!--
+ <div class="map-sidebar right-sidebar">
+ <h3>Legend</h3>
+ <ul>
+ <li><span style="color: #f2f293">&#9632;</span> Industry</li>
+ <li><span style="color: #f30000">&#9632;</span> Academic</li>
+ <li><span style="color: #3264f6">&#9632;</span> Government</li>
+ </ul>
+ </div>
+ -->
+ <p>
+ To understand how MsCeleb has been used around the world...
+ affected global research on computer vision, surveillance, defense, and consumer technology, the and where this dataset has been used the locations of each organization that used or referenced the datast
+ </p>
+
+ </section>
+
+<section class="applet_container">
+ <div class="applet" data-payload="{&quot;command&quot;: &quot;map&quot;}"></div>
+</section>
+
+<div class="caption">
+ <ul class="map-legend">
+ <li class="edu">Academic</li>
+ <li class="com">Industry</li>
+ <li class="gov">Government / Military</li>
+ <li class="source">Citation data is collected using <a href="https://semanticscholar.org" target="_blank">SemanticScholar.org</a> then dataset usage verified and geolocated.</li>
+ </ul>
+</div>
+
+<section>
+ <p class='subp'>
+ [section under development] MsCeleb ... Standardized paragraph of text about the map. Sed ut perspiciatis, unde omnis iste natus error sit voluptatem accusantium doloremque laudantium, totam rem aperiam eaque ipsa, quae ab illo inventore veritatis et quasi architecto beatae vitae dicta sunt, explicabo.
+ </p>
+</section><section><p>Add more analysis here</p>
+</section><section>
+
+
+ <div class="hr-wave-holder">
+ <div class="hr-wave-line hr-wave-line1"></div>
+ <div class="hr-wave-line hr-wave-line2"></div>
+ </div>
+
+ <h2>Supplementary Information</h2>
+</section><section class="applet_container">
+
+ <h3>Citations</h3>
+ <p>
+ Citations were collected from <a href="https://www.semanticscholar.org">Semantic Scholar</a>, a website which aggregates
+ and indexes research papers. The citations were geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms.
+ </p>
+ <p>
+ Add [button/link] to download CSV. Add search input field to filter.
+ </p>
+
+ <div class="applet" data-payload="{&quot;command&quot;: &quot;citations&quot;}"></div>
+</section><section><h3>Additional Information</h3>
+<ul>
+<li>The dataset author spoke about his research at the CVPR conference in 2016 <a href="https://www.youtube.com/watch?v=Nl2fBKxwusQ">https://www.youtube.com/watch?v=Nl2fBKxwusQ</a></li>
+</ul>
+</section><section><ul class="footnotes"><li><a name="[^readme]" class="footnote_shim"></a><span class="backlinks"></span><p>"readme.txt" <a href="https://exhibits.stanford.edu/data/catalog/sx925dc9385">https://exhibits.stanford.edu/data/catalog/sx925dc9385</a>.</p>
+</li><li><a name="[^localized_region_context]" class="footnote_shim"></a><span class="backlinks"></span><p>Li, Y. and Dou, Y. and Liu, X. and Li, T. Localized Region Context and Object Feature Fusion for People Head Detection. ICIP16 Proceedings. 2016. Pages 594-598.</p>
+</li><li><a name="[^replacement_algorithm]" class="footnote_shim"></a><span class="backlinks"></span><p>Zhao. X, Wang Y, Dou, Y. A Replacement Algorithm of Non-Maximum Suppression Base on Graph Clustering.</p>
+</li></ul></section>
+
+ </div>
+ <footer>
+ <div>
+ <a href="/">MegaPixels.cc</a>
+ <a href="/about/disclaimer/">Disclaimer</a>
+ <a href="/about/terms/">Terms of Use</a>
+ <a href="/about/privacy/">Privacy</a>
+ <a href="/about/">About</a>
+ <a href="/about/team/">Team</a>
+ </div>
+ <div>
+ MegaPixels &copy;2017-19 Adam R. Harvey /&nbsp;
+ <a href="https://ahprojects.com">ahprojects.com</a>
+ </div>
+ </footer>
+</body>
+
+<script src="/assets/js/dist/index.js"></script>
+</html> \ No newline at end of file
diff --git a/site/public/datasets/pipa/index.html b/site/public/datasets/pipa/index.html
index 43b08f17..09baca99 100644
--- a/site/public/datasets/pipa/index.html
+++ b/site/public/datasets/pipa/index.html
@@ -80,10 +80,10 @@
<h3>Citations</h3>
<p>
Citations were collected from <a href="https://www.semanticscholar.org">Semantic Scholar</a>, a website which aggregates
- and indexes research papers. The citations were geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train and/or test machine learning algorithms.
+ and indexes research papers. The citations were geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms.
</p>
<p>
- Add [button/link] to download CSV. Add search input field to filter. Expand number of rows to 10. Reduce URL text to show only the domain (ie https://arxiv.org/pdf/123456 --> arxiv.org)
+ Add [button/link] to download CSV. Add search input field to filter.
</p>
<div class="applet" data-payload="{&quot;command&quot;: &quot;citations&quot;}"></div>
diff --git a/site/public/datasets/uccs/index.html b/site/public/datasets/uccs/index.html
index 5cea1c6b..ca106022 100644
--- a/site/public/datasets/uccs/index.html
+++ b/site/public/datasets/uccs/index.html
@@ -103,10 +103,10 @@
<h3>Citations</h3>
<p>
Citations were collected from <a href="https://www.semanticscholar.org">Semantic Scholar</a>, a website which aggregates
- and indexes research papers. The citations were geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train and/or test machine learning algorithms.
+ and indexes research papers. The citations were geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms.
</p>
<p>
- Add [button/link] to download CSV. Add search input field to filter. Expand number of rows to 10. Reduce URL text to show only the domain (ie https://arxiv.org/pdf/123456 --> arxiv.org)
+ Add [button/link] to download CSV. Add search input field to filter.
</p>
<div class="applet" data-payload="{&quot;command&quot;: &quot;citations&quot;}"></div>
diff --git a/site/public/datasets/viper/index.html b/site/public/datasets/viper/index.html
index f30caeab..f78d1c04 100644
--- a/site/public/datasets/viper/index.html
+++ b/site/public/datasets/viper/index.html
@@ -105,10 +105,10 @@
<h3>Citations</h3>
<p>
Citations were collected from <a href="https://www.semanticscholar.org">Semantic Scholar</a>, a website which aggregates
- and indexes research papers. The citations were geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train and/or test machine learning algorithms.
+ and indexes research papers. The citations were geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms.
</p>
<p>
- Add [button/link] to download CSV. Add search input field to filter. Expand number of rows to 10. Reduce URL text to show only the domain (ie https://arxiv.org/pdf/123456 --> arxiv.org)
+ Add [button/link] to download CSV. Add search input field to filter.
</p>
<div class="applet" data-payload="{&quot;command&quot;: &quot;citations&quot;}"></div>