diff options
| -rw-r--r-- | client/index.js | 27 | ||||
| -rw-r--r-- | client/map/index.js | 2 | ||||
| -rw-r--r-- | client/tables.js | 24 | ||||
| -rw-r--r-- | megapixels/app/site/parser.py | 6 | ||||
| -rw-r--r-- | site/public/datasets/index.html | 2 | ||||
| -rw-r--r-- | site/public/datasets/lfw/index.html | 6 | ||||
| -rw-r--r-- | site/public/datasets/vgg_face2/index.html | 2 | ||||
| -rw-r--r-- | site/public/index.html | 2 | ||||
| -rw-r--r-- | site/public/test/citations/index.html | 2 | ||||
| -rw-r--r-- | site/public/test/csv/index.html | 2 | ||||
| -rw-r--r-- | site/public/test/datasets/index.html | 2 | ||||
| -rw-r--r-- | site/public/test/face_search/index.html | 2 | ||||
| -rw-r--r-- | site/public/test/map/index.html | 2 | ||||
| -rw-r--r-- | site/public/test/name_search/index.html | 2 | ||||
| -rw-r--r-- | site/public/test/style/index.html | 4 |
15 files changed, 55 insertions, 32 deletions
diff --git a/client/index.js b/client/index.js index 109d2c27..86bc8979 100644 --- a/client/index.js +++ b/client/index.js @@ -46,32 +46,47 @@ function appendApplets(applets) { function runApplets() { const applets = toArray(document.querySelectorAll('.applet')).map(el => { - console.log(el.dataset.payload) + // console.log(el.dataset.payload) let payload try { payload = JSON.parse(el.dataset.payload) + console.log(payload) } catch (e) { return null } - console.log(payload) let cmdPartz = payload.command.split(" ") let cmd = cmdPartz.shift() let dataset = null + let url = null + let opt = null payload.cmd = cmd - payload.parts = cmdPartz - if (payload.parts.length) { - dataset = payload.parts[0].trim() + payload.partz = cmdPartz + if (payload.partz.length) { + opt = payload.partz.shift().trim() + if (opt.indexOf('http') === 0) { + dataset = null + url = opt + } else if (opt.indexOf('assets') === 0) { + url = 'https://nyc3.digitaloceanspaces.com/megapixels/v1' + window.location.pathname + opt + dataset = null + // console.log(url) + } else { + dataset = opt + url = null + } } - if (!dataset) { + if (!dataset && !url) { const path = window.location.pathname.split('/').filter(s => !!s) if (path.length) { dataset = path[path.length - 1] + // console.log('dataset from path:', dataset) } else { console.log('couldnt determine citations dataset') return null } } payload.dataset = dataset + payload.url = url return [el, payload] }).filter(a => !!a) const withDataset = applets.map(a => a[1].dataset ? a[1] : null).filter(a => !!a) diff --git a/client/map/index.js b/client/map/index.js index 15153a90..788894f9 100644 --- a/client/map/index.js +++ b/client/map/index.js @@ -2,7 +2,7 @@ import L from 'leaflet' import './leaflet.bezier' function getCitations(dataset) { - console.log(dataset.citations) + // console.log(dataset.citations) return dataset.citations.map(c => ({ title: c[0], location: c[2], diff --git a/client/tables.js b/client/tables.js index 4a6c666f..a30abc32 100644 --- a/client/tables.js +++ b/client/tables.js @@ -16,13 +16,14 @@ const citationsColumns = [ ] function getColumns(payload) { - if (payload.cmd === 'citations') { + let { cmd, url, fields } = payload + if (cmd === 'citations') { return citationsColumns } - if (payload.opt.match('datasets.csv')) { + if (url && url.match('datasets.csv')) { return datasetColumns } - return (payload.fields || '').split(', ').map(field => { + return ((fields && fields.length) ? fields[0] : '').split(', ').map(field => { switch (field) { default: return { title: field, field: field.toLowerCase(), sorter: 'string' } @@ -39,8 +40,8 @@ function getCitations(dataset) { } export default function append(el, payload) { - let url = payload.opt const columns = getColumns(payload) + console.log(columns) const table = new Tabulator(el, { height: '311px', layout: 'fitColumns', @@ -50,18 +51,25 @@ export default function append(el, payload) { // let path = payload.opt // console.log(path, columns) + console.log(payload.cmd, payload.url, payload.dataset) if (payload.cmd === 'citations') { let { data } = payload const citations = getCitations(data) console.log(citations) table.setData(citations) } else { - fetch(url, { mode: 'cors' }) + fetch(payload.url, { mode: 'cors' }) .then(r => r.text()) .then(text => { - const data = csv.toJSON(text, { headers: { included: true } }) - console.log(data) - table.setData(data) + try { + const data = csv.toJSON(text, { headers: { included: true } }) + // console.log(data) + table.setData(data) + } catch (e) { + console.error("error parsing json:", payload.url) + console.error(e) + // console.log(text) + } }) } } diff --git a/megapixels/app/site/parser.py b/megapixels/app/site/parser.py index ff209793..ecfae0cb 100644 --- a/megapixels/app/site/parser.py +++ b/megapixels/app/site/parser.py @@ -73,9 +73,9 @@ def format_applet(section, s3_path): if command == 'load_file': if opt[0:4] != 'http': applet['opt'] = s3_path + opt - if len(payload) > 1: - applet['fields'] = payload[1] - return "<section class='applet_container'><div class='applet' data-payload='{}'></div></section>".format(className, json.dumps(applet)) + if len(payload) > 1: + applet['fields'] = payload[1:] + return "<section class='applet_container'><div class='applet' data-payload='{}'></div></section>".format(json.dumps(applet)) def parse_markdown(sections, s3_path, skip_h1=False): """ diff --git a/site/public/datasets/index.html b/site/public/datasets/index.html index 4a0f32eb..bcc7c1ab 100644 --- a/site/public/datasets/index.html +++ b/site/public/datasets/index.html @@ -31,7 +31,7 @@ <section><h1>Facial Recognition Datasets</h1> <p>Regular Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.</p> <h3>Summary</h3> -</section><section><div class='meta'><div><div class='gray'>Found</div><div>275 datasets</div></div><div><div class='gray'>Created between</div><div>1993-2018</div></div><div><div class='gray'>Smallest dataset</div><div>20 images</div></div><div><div class='gray'>Largest dataset</div><div>10,000,000 images</div></div></div></section><section><div class='meta'><div><div class='gray'>Highest resolution faces</div><div>450x500 (Unconstrained College Students)</div></div><div><div class='gray'>Lowest resolution faces</div><div>16x20 pixels (QMUL SurvFace)</div></div></div></section><section><div class='applet' data-payload='{"command": "load_file https://megapixels.nyc3.digitaloceanspaces.com/v1/citations/datasets.csv"}'></div></section> +</section><section><div class='meta'><div><div class='gray'>Found</div><div>275 datasets</div></div><div><div class='gray'>Created between</div><div>1993-2018</div></div><div><div class='gray'>Smallest dataset</div><div>20 images</div></div><div><div class='gray'>Largest dataset</div><div>10,000,000 images</div></div></div></section><section><div class='meta'><div><div class='gray'>Highest resolution faces</div><div>450x500 (Unconstrained College Students)</div></div><div><div class='gray'>Lowest resolution faces</div><div>16x20 pixels (QMUL SurvFace)</div></div></div></section><section class='applet_container'><div class='applet' data-payload='{"command": "load_file https://megapixels.nyc3.digitaloceanspaces.com/v1/citations/datasets.csv"}'></div></section> </div> <footer> diff --git a/site/public/datasets/lfw/index.html b/site/public/datasets/lfw/index.html index 84028a3b..9adf29b1 100644 --- a/site/public/datasets/lfw/index.html +++ b/site/public/datasets/lfw/index.html @@ -30,7 +30,7 @@ <section><h1>Labeled Faces in the Wild</h1> </section><section><div class='meta'><div><div class='gray'>Created</div><div>2007</div></div><div><div class='gray'>Images</div><div>13,233</div></div><div><div class='gray'>People</div><div>5,749</div></div><div><div class='gray'>Created From</div><div>Yahoo News images</div></div><div><div class='gray'>Search available</div><div>Searchable</div></div></div></section><section><p>Labeled Faces in The Wild (LFW) is amongst the most widely used facial recognition training datasets in the world and is the first of its kind to be created entirely from images posted online. The LFW dataset includes 13,233 images of 5,749 people that were collected between 2002-2004. Use the tools below to check if you were included in this dataset or scroll down to read the analysis.</p> -</section><section><div class='applet' data-payload='{"command": "face_search"}'></div></section><section><div class='applet' data-payload='{"command": "name_search"}'></div></section><section><div class='applet' data-payload='{"command": "load_file assets/lfw_names_gender_kg_min.csv"}'></div></section><section class='images'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/lfw/assets/lfw_feature.jpg' alt='Eighteen of the 5,749 people in the Labeled Faces in the Wild Dataset. The most widely used face dataset for benchmarking commercial face recognition algorithms.'><div class='caption'>Eighteen of the 5,749 people in the Labeled Faces in the Wild Dataset. The most widely used face dataset for benchmarking commercial face recognition algorithms.</div></div></section><section><h2>Intro</h2> +</section><section class='applet_container'><div class='applet' data-payload='{"command": "face_search"}'></div></section><section class='applet_container'><div class='applet' data-payload='{"command": "name_search"}'></div></section><section class='applet_container'><div class='applet' data-payload='{"command": "load_file assets/lfw_names_gender_kg_min.csv", "fields": ["Name, Images, Gender, Description"]}'></div></section><section class='images'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/lfw/assets/lfw_feature.jpg' alt='Eighteen of the 5,749 people in the Labeled Faces in the Wild Dataset. The most widely used face dataset for benchmarking commercial face recognition algorithms.'><div class='caption'>Eighteen of the 5,749 people in the Labeled Faces in the Wild Dataset. The most widely used face dataset for benchmarking commercial face recognition algorithms.</div></div></section><section><h2>Intro</h2> <p>Three paragraphs describing the LFW dataset in a format that can be easily replicated for the other datasets. Nothing too custom. An analysis of the initial research papers with context relative to all the other dataset papers.</p> </section><section class='images'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/lfw/assets/lfw_montage_everyone_nocrop_1920.jpg' alt=' all 5,749 people in the LFW Dataset sorted from most to least images collected.'><div class='caption'> all 5,749 people in the LFW Dataset sorted from most to least images collected.</div></div></section><section><h2>LFW by the Numbers</h2> <ul> @@ -67,7 +67,7 @@ <p>According to BiometricUpdate.com [^lfw_pingan], LFW is "the most widely used evaluation set in the field of facial recognition, LFW attracts a few dozen teams from around the globe including Google, Facebook, Microsoft Research Asia, Baidu, Tencent, SenseTime, Face++ and Chinese University of Hong Kong."</p> <p>According to researchers at the Baidu Research – Institute of Deep Learning "LFW has been the most popular evaluation benchmark for face recognition, and played a very important role in facilitating the face recognition society to improve algorithm. [^lfw_baidu]."</p> <p>In addition to commercial use as an evaluation tool, alll of the faces in LFW dataset are prepackaged into a popular machine learning code framework called scikit-learn.</p> -</section><section><div class='applet' data-payload='{"command": "load_file assets/lfw_commercial_use.csv"}'></div></section><section><table> +</section><section class='applet_container'><div class='applet' data-payload='{"command": "load_file assets/lfw_commercial_use.csv", "fields": ["name_display, company_url, example_url, country, description"]}'></div></section><section><table> <thead><tr> <th style="text-align:left">Company</th> <th style="text-align:left">Country</th> @@ -100,7 +100,7 @@ <h2>Citations</h2> <p>Overall, LFW has at least 456 citations from 123 countries. Sed ut perspiciatis, unde omnis iste natus error sit voluptatem accusantium doloremque laudantium, totam rem aperiam eaque ipsa, quae ab illo inventore veritatis et quasi architecto beatae vitae dicta sunt, explicabo. Nemo enim ipsam voluptatem, quia voluptas sit, aspernatur aut odit aut fugit, sed quia consequuntur magni dolores eos.</p> <p>Sed ut perspiciatis, unde omnis iste natus error sit voluptatem accusantium doloremque laudantium, totam rem aperiam eaque ipsa, quae ab illo inventore veritatis et quasi architecto beatae vitae dicta sunt, explicabo. Nemo enim ipsam voluptatem, quia voluptas sit, aspernatur aut odit aut fugit, sed quia consequuntur magni dolores eos.</p> -</section><section class='images'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/lfw/assets/temp_graph.jpg' alt='Distribution of citations per year per country for the top 5 countries with citations for the LFW Dataset'><div class='caption'>Distribution of citations per year per country for the top 5 countries with citations for the LFW Dataset</div></div></section><section class='images'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/lfw/assets/temp_map.jpg' alt='Geographic distributions of citations for the LFW Dataset'><div class='caption'>Geographic distributions of citations for the LFW Dataset</div></div></section><section><h2>Conclusion</h2> +</section><section class='images'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/lfw/assets/temp_graph.jpg' alt='Distribution of citations per year per country for the top 5 countries with citations for the LFW Dataset'><div class='caption'>Distribution of citations per year per country for the top 5 countries with citations for the LFW Dataset</div></div></section><section class='applet_container'><div class='applet' data-payload='{"command": "map"}'></div></section><section class='applet_container'><div class='applet' data-payload='{"command": "citations"}'></div></section><section><h2>Conclusion</h2> <p>The LFW face recognition training and evaluation dataset is a historically important face dataset as it was the first popular dataset to be created entirely from Internet images, paving the way for a global trend towards downloading anyone’s face from the Internet and adding it to a dataset. As will be evident with other datasets, LFW’s approach has now become the norm.</p> <p>For all the 5,000 people in this datasets, their face is forever a part of facial recognition history. It would be impossible to remove anyone from the dataset because it is so ubiquitous. For their rest of the lives and forever after, these 5,000 people will continue to be used for training facial recognition surveillance.</p> <h2>Right to Removal</h2> diff --git a/site/public/datasets/vgg_face2/index.html b/site/public/datasets/vgg_face2/index.html index 90216add..63715a4f 100644 --- a/site/public/datasets/vgg_face2/index.html +++ b/site/public/datasets/vgg_face2/index.html @@ -30,7 +30,7 @@ <section><h1>VGG Faces2</h1> </section><section><div class='meta'><div><div class='gray'>Created</div><div>2018</div></div><div><div class='gray'>Images</div><div>3.3M</div></div><div><div class='gray'>People</div><div>9,000</div></div><div><div class='gray'>Created From</div><div>Scraping search engines</div></div><div><div class='gray'>Search available</div><div>[Searchable](#)</div></div></div></section><section><p>VGG Face2 is the updated version of the VGG Face dataset and now includes over 3.3M face images from over 9K people. The identities were selected by taking the top 500K identities in Google's Knowledge Graph of celebrities and then selecting only the names that yielded enough training images. The dataset was created in the UK but funded by Office of Director of National Intelligence in the United States.</p> -</section><section><div class='applet' data-payload='{"command": "face_search"}'></div></section><section><div class='applet' data-payload='{"command": "name_search"}'></div></section><section><div class='applet' data-payload='{"command": "load_file assets/lfw_names_gender_kg_min.csv"}'></div></section><section><h2>VGG Face2 by the Numbers</h2> +</section><section class='applet_container'><div class='applet' data-payload='{"command": "face_search"}'></div></section><section class='applet_container'><div class='applet' data-payload='{"command": "name_search"}'></div></section><section class='applet_container'><div class='applet' data-payload='{"command": "load_file assets/lfw_names_gender_kg_min.csv", "fields": ["Name, Images, Gender, Description"]}'></div></section><section><h2>VGG Face2 by the Numbers</h2> <ul> <li>1,331 actresses, 139 presidents</li> <li>3 husbands and 16 wives</li> diff --git a/site/public/index.html b/site/public/index.html index 688eb700..6db6ccb7 100644 --- a/site/public/index.html +++ b/site/public/index.html @@ -51,7 +51,7 @@ <section><h2>Facial Recognition Datasets</h2> <p>Regular Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.</p> <h3>Summary</h3> -</section><section><div class='meta'><div><div class='gray'>Found</div><div>275 datasets</div></div><div><div class='gray'>Created between</div><div>1993-2018</div></div><div><div class='gray'>Smallest dataset</div><div>20 images</div></div><div><div class='gray'>Largest dataset</div><div>10,000,000 images</div></div></div></section><section><div class='meta'><div><div class='gray'>Highest resolution faces</div><div>450x500 (Unconstrained College Students)</div></div><div><div class='gray'>Lowest resolution faces</div><div>16x20 pixels (QMUL SurvFace)</div></div></div></section><section><div class='applet' data-payload='{"command": "load_file https://megapixels.nyc3.digitaloceanspaces.com/v1/citations/datasets.csv"}'></div></section> +</section><section><div class='meta'><div><div class='gray'>Found</div><div>275 datasets</div></div><div><div class='gray'>Created between</div><div>1993-2018</div></div><div><div class='gray'>Smallest dataset</div><div>20 images</div></div><div><div class='gray'>Largest dataset</div><div>10,000,000 images</div></div></div></section><section><div class='meta'><div><div class='gray'>Highest resolution faces</div><div>450x500 (Unconstrained College Students)</div></div><div><div class='gray'>Lowest resolution faces</div><div>16x20 pixels (QMUL SurvFace)</div></div></div></section><section class='applet_container'><div class='applet' data-payload='{"command": "load_file https://megapixels.nyc3.digitaloceanspaces.com/v1/citations/datasets.csv"}'></div></section> </div> diff --git a/site/public/test/citations/index.html b/site/public/test/citations/index.html index cb5aa92f..c2bed996 100644 --- a/site/public/test/citations/index.html +++ b/site/public/test/citations/index.html @@ -30,7 +30,7 @@ <section><h1>Citations</h1> <h2><a href="/test/">← Back to test index</a></h2> -</section><section><div class='applet' data-payload='{"command": "citations lfw"}'></div></section> +</section><section class='applet_container'><div class='applet' data-payload='{"command": "citations lfw"}'></div></section> </div> <footer> diff --git a/site/public/test/csv/index.html b/site/public/test/csv/index.html index 52c87d4d..e53c1421 100644 --- a/site/public/test/csv/index.html +++ b/site/public/test/csv/index.html @@ -30,7 +30,7 @@ <section><h1>CSV Test</h1> <h2><a href="/test/">← Back to test index</a></h2> -</section><section><div class='applet' data-payload='{"command": "load_file /datasets/lfw/assets/lfw_names_gender_kg_min.csv"}'></div></section> +</section><section class='applet_container'><div class='applet' data-payload='{"command": "load_file /datasets/lfw/assets/lfw_names_gender_kg_min.csv", "fields": ["Name, Images, Gender, Description"]}'></div></section> </div> <footer> diff --git a/site/public/test/datasets/index.html b/site/public/test/datasets/index.html index 1554ae2b..421ecb97 100644 --- a/site/public/test/datasets/index.html +++ b/site/public/test/datasets/index.html @@ -30,7 +30,7 @@ <section><h1>Index of datasets</h1> <h2><a href="/test/">← Back to test index</a></h2> -</section><section><div class='applet' data-payload='{"command": "load_file https://megapixels.nyc3.digitaloceanspaces.com/v1/citations/datasets.csv"}'></div></section> +</section><section class='applet_container'><div class='applet' data-payload='{"command": "load_file https://megapixels.nyc3.digitaloceanspaces.com/v1/citations/datasets.csv"}'></div></section> </div> <footer> diff --git a/site/public/test/face_search/index.html b/site/public/test/face_search/index.html index 43703ed5..1823318d 100644 --- a/site/public/test/face_search/index.html +++ b/site/public/test/face_search/index.html @@ -30,7 +30,7 @@ <section><h1>Face search</h1> <h2><a href="/test/">← Back to test index</a></h2> -</section><section><div class='applet' data-payload='{"command": "face_search lfw"}'></div></section> +</section><section class='applet_container'><div class='applet' data-payload='{"command": "face_search lfw"}'></div></section> </div> <footer> diff --git a/site/public/test/map/index.html b/site/public/test/map/index.html index bdec69f5..c1f67471 100644 --- a/site/public/test/map/index.html +++ b/site/public/test/map/index.html @@ -30,7 +30,7 @@ <section><h1>Map test</h1> <h2><a href="/test/">← Back to test index</a></h2> -</section><section><div class='applet' data-payload='{"command": "map lfw"}'></div></section> +</section><section class='applet_container'><div class='applet' data-payload='{"command": "map lfw"}'></div></section> </div> <footer> diff --git a/site/public/test/name_search/index.html b/site/public/test/name_search/index.html index a23aa53d..db38ba04 100644 --- a/site/public/test/name_search/index.html +++ b/site/public/test/name_search/index.html @@ -30,7 +30,7 @@ <section><h1>Name search</h1> <h2><a href="/test/">← Back to test index</a></h2> -</section><section><div class='applet' data-payload='{"command": "name_search lfw"}'></div></section> +</section><section class='applet_container'><div class='applet' data-payload='{"command": "name_search lfw"}'></div></section> </div> <footer> diff --git a/site/public/test/style/index.html b/site/public/test/style/index.html index 3d14bd77..3ef7d918 100644 --- a/site/public/test/style/index.html +++ b/site/public/test/style/index.html @@ -54,10 +54,10 @@ <div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/site/test/assets/man.jpg' alt='Person 3. Let me tell you about Person 3. This person has a very long description with text which wraps like crazy'><div class='caption'>Person 3. Let me tell you about Person 3. This person has a very long description with text which wraps like crazy</div></div></section><section><blockquote><p>est, qui dolorem ipsum, quia dolor sit amet consectetur adipisci[ng] velit, sed quia non-numquam [do] eius modi tempora inci[di]dunt, ut labore et dolore magnam aliquam quaerat voluptatem.</p> </blockquote> </section><section class='wide'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/site/test/assets/wide-test.jpg' alt='This image is extremely wide and the text beneath it will wrap but thats fine because it can also contain <a href="https://example.com/">hyperlinks</a>! Yes, you read that right—hyperlinks! Lorem ipsum dolor sit amet ad volotesque sic hoc ad nauseam'><div class='caption'>This image is extremely wide and the text beneath it will wrap but that's fine because it can also contain <a href="https://example.com/">hyperlinks</a>! Yes, you read that right—hyperlinks! Lorem ipsum dolor sit amet ad volotesque sic hoc ad nauseam</div></div></section><section><p>Inline <code>code</code> has <code>back-ticks around</code> it.</p> -</section><section><div class='applet' data-payload='{"command": "javascript"}'></div></section><section><pre><code class="lang-python">s = "Python syntax highlighting" +</section><section class='applet_container'><div class='applet' data-payload='{"command": "javascript", "fields": ["var s = \"JavaScript syntax highlighting\";", "alert(s);"]}'></div></section><section><pre><code class="lang-python">s = "Python syntax highlighting" print(s) </code></pre> -</section><section><div class='applet' data-payload='{"command": "No language indicated, so no syntax highlighting. "}'></div></section><section><p>Horizontal rule</p> +</section><section class='applet_container'><div class='applet' data-payload='{"command": "No language indicated, so no syntax highlighting. ", "fields": ["But let's throw in a <b>tag</b>."]}'></div></section><section><p>Horizontal rule</p> <hr> <p>Citations below here</p> <div class="footnotes"> |
