From 6431d06048791763f3644b3a0457cc9c4f1df6d3 Mon Sep 17 00:00:00 2001 From: Jules Laplace Date: Sun, 16 Dec 2018 18:11:12 +0100 Subject: csvs and citations --- client/index.js | 27 +++++++++++++++++++++------ client/map/index.js | 2 +- client/tables.js | 24 ++++++++++++++++-------- megapixels/app/site/parser.py | 6 +++--- site/public/datasets/index.html | 2 +- site/public/datasets/lfw/index.html | 6 +++--- site/public/datasets/vgg_face2/index.html | 2 +- site/public/index.html | 2 +- site/public/test/citations/index.html | 2 +- site/public/test/csv/index.html | 2 +- site/public/test/datasets/index.html | 2 +- site/public/test/face_search/index.html | 2 +- site/public/test/map/index.html | 2 +- site/public/test/name_search/index.html | 2 +- site/public/test/style/index.html | 4 ++-- 15 files changed, 55 insertions(+), 32 deletions(-) diff --git a/client/index.js b/client/index.js index 109d2c27..86bc8979 100644 --- a/client/index.js +++ b/client/index.js @@ -46,32 +46,47 @@ function appendApplets(applets) { function runApplets() { const applets = toArray(document.querySelectorAll('.applet')).map(el => { - console.log(el.dataset.payload) + // console.log(el.dataset.payload) let payload try { payload = JSON.parse(el.dataset.payload) + console.log(payload) } catch (e) { return null } - console.log(payload) let cmdPartz = payload.command.split(" ") let cmd = cmdPartz.shift() let dataset = null + let url = null + let opt = null payload.cmd = cmd - payload.parts = cmdPartz - if (payload.parts.length) { - dataset = payload.parts[0].trim() + payload.partz = cmdPartz + if (payload.partz.length) { + opt = payload.partz.shift().trim() + if (opt.indexOf('http') === 0) { + dataset = null + url = opt + } else if (opt.indexOf('assets') === 0) { + url = 'https://nyc3.digitaloceanspaces.com/megapixels/v1' + window.location.pathname + opt + dataset = null + // console.log(url) + } else { + dataset = opt + url = null + } } - if (!dataset) { + if (!dataset && !url) { const path = window.location.pathname.split('/').filter(s => !!s) if (path.length) { dataset = path[path.length - 1] + // console.log('dataset from path:', dataset) } else { console.log('couldnt determine citations dataset') return null } } payload.dataset = dataset + payload.url = url return [el, payload] }).filter(a => !!a) const withDataset = applets.map(a => a[1].dataset ? a[1] : null).filter(a => !!a) diff --git a/client/map/index.js b/client/map/index.js index 15153a90..788894f9 100644 --- a/client/map/index.js +++ b/client/map/index.js @@ -2,7 +2,7 @@ import L from 'leaflet' import './leaflet.bezier' function getCitations(dataset) { - console.log(dataset.citations) + // console.log(dataset.citations) return dataset.citations.map(c => ({ title: c[0], location: c[2], diff --git a/client/tables.js b/client/tables.js index 4a6c666f..a30abc32 100644 --- a/client/tables.js +++ b/client/tables.js @@ -16,13 +16,14 @@ const citationsColumns = [ ] function getColumns(payload) { - if (payload.cmd === 'citations') { + let { cmd, url, fields } = payload + if (cmd === 'citations') { return citationsColumns } - if (payload.opt.match('datasets.csv')) { + if (url && url.match('datasets.csv')) { return datasetColumns } - return (payload.fields || '').split(', ').map(field => { + return ((fields && fields.length) ? fields[0] : '').split(', ').map(field => { switch (field) { default: return { title: field, field: field.toLowerCase(), sorter: 'string' } @@ -39,8 +40,8 @@ function getCitations(dataset) { } export default function append(el, payload) { - let url = payload.opt const columns = getColumns(payload) + console.log(columns) const table = new Tabulator(el, { height: '311px', layout: 'fitColumns', @@ -50,18 +51,25 @@ export default function append(el, payload) { // let path = payload.opt // console.log(path, columns) + console.log(payload.cmd, payload.url, payload.dataset) if (payload.cmd === 'citations') { let { data } = payload const citations = getCitations(data) console.log(citations) table.setData(citations) } else { - fetch(url, { mode: 'cors' }) + fetch(payload.url, { mode: 'cors' }) .then(r => r.text()) .then(text => { - const data = csv.toJSON(text, { headers: { included: true } }) - console.log(data) - table.setData(data) + try { + const data = csv.toJSON(text, { headers: { included: true } }) + // console.log(data) + table.setData(data) + } catch (e) { + console.error("error parsing json:", payload.url) + console.error(e) + // console.log(text) + } }) } } diff --git a/megapixels/app/site/parser.py b/megapixels/app/site/parser.py index ff209793..ecfae0cb 100644 --- a/megapixels/app/site/parser.py +++ b/megapixels/app/site/parser.py @@ -73,9 +73,9 @@ def format_applet(section, s3_path): if command == 'load_file': if opt[0:4] != 'http': applet['opt'] = s3_path + opt - if len(payload) > 1: - applet['fields'] = payload[1] - return "
".format(className, json.dumps(applet)) + if len(payload) > 1: + applet['fields'] = payload[1:] + return "
".format(json.dumps(applet)) def parse_markdown(sections, s3_path, skip_h1=False): """ diff --git a/site/public/datasets/index.html b/site/public/datasets/index.html index 4a0f32eb..bcc7c1ab 100644 --- a/site/public/datasets/index.html +++ b/site/public/datasets/index.html @@ -31,7 +31,7 @@

Facial Recognition Datasets

Regular Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.

Summary

-
Found
275 datasets
Created between
1993-2018
Smallest dataset
20 images
Largest dataset
10,000,000 images
Highest resolution faces
450x500 (Unconstrained College Students)
Lowest resolution faces
16x20 pixels (QMUL SurvFace)
+
Found
275 datasets
Created between
1993-2018
Smallest dataset
20 images
Largest dataset
10,000,000 images
Highest resolution faces
450x500 (Unconstrained College Students)
Lowest resolution faces
16x20 pixels (QMUL SurvFace)