From 38746f284b17400d4e2555509ea60df5912b824a Mon Sep 17 00:00:00 2001 From: Jules Laplace Date: Fri, 14 Dec 2018 18:10:27 +0100 Subject: all the sql stuff communicating nicely --- megapixels/app/server/api.py | 72 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 72 insertions(+) create mode 100644 megapixels/app/server/api.py (limited to 'megapixels/app/server/api.py') diff --git a/megapixels/app/server/api.py b/megapixels/app/server/api.py new file mode 100644 index 00000000..e7db11f1 --- /dev/null +++ b/megapixels/app/server/api.py @@ -0,0 +1,72 @@ +from flask import Blueprint, jsonify + +from app.models.sql_factory import list_datasets, get_dataset, get_table + +# from jinja2 import TemplateNotFound + +# import os +# import sys +# import json +# import time +# import argparse +# import cv2 as cv +# import numpy as np +# from datetime import datetime +# from flask import Flask, request, render_template, jsonify +# from PIL import Image # todo: try to remove PIL dependency +# import re + +# sanitize_re = re.compile('[\W]+') +# valid_exts = ['.gif', '.jpg', '.jpeg', '.png'] + +# from dotenv import load_dotenv +# load_dotenv() + +# from feature_extractor import FeatureExtractor + +# DEFAULT_LIMIT = 50 + +api = Blueprint('api', __name__) + +@api.route('/') +def index(): + return jsonify({ 'datasets': list_datasets() }) + +@api.route('/dataset//test', methods=['POST']) +def test(dataset='test'): + dataset = get_dataset(dataset) + print('hiiiiii') + return jsonify({ 'test': 'OK', 'dataset': dataset }) + +# @router.route('//face', methods=['POST']) +# def upload(name): +# file = request.files['query_img'] +# fn = file.filename +# if fn.endswith('blob'): +# fn = 'filename.jpg' + +# basename, ext = os.path.splitext(fn) +# print("got {}, type {}".format(basename, ext)) +# if ext.lower() not in valid_exts: +# return jsonify({ 'error': 'not an image' }) + +# uploaded_fn = datetime.now().isoformat() + "_" + basename +# uploaded_fn = sanitize_re.sub('', uploaded_fn) +# uploaded_img_path = "static/uploaded/" + uploaded_fn + ext +# uploaded_img_path = uploaded_img_path.lower() +# print('query: {}'.format(uploaded_img_path)) + +# img = Image.open(file.stream).convert('RGB') +# # img.save(uploaded_img_path) +# # vec = db.load_feature_vector_from_file(uploaded_img_path) +# vec = fe.extract(img) +# # print(vec.shape) + +# results = db.search(vec, limit=limit) +# query = { +# 'timing': time.time() - start, +# } +# print(results) +# return jsonify({ +# 'results': results, +# }) -- cgit v1.2.3-70-g09d2 From 2ee8cd6a77c3efed77e58d706f4ee76418770e54 Mon Sep 17 00:00:00 2001 From: Jules Laplace Date: Fri, 14 Dec 2018 18:17:15 +0100 Subject: sub apis workin --- megapixels/app/models/sql_factory.py | 11 +++++++---- megapixels/app/server/api.py | 15 +++++++++++---- 2 files changed, 18 insertions(+), 8 deletions(-) (limited to 'megapixels/app/server/api.py') diff --git a/megapixels/app/models/sql_factory.py b/megapixels/app/models/sql_factory.py index 2a18d6af..e35c3e15 100644 --- a/megapixels/app/models/sql_factory.py +++ b/megapixels/app/models/sql_factory.py @@ -21,10 +21,7 @@ datasets = {} loaded = False def list_datasets(): - return [{ - 'name': name, - 'tables': list(datasets[name].tables.keys()), - } for name in datasets.keys()] + return [dataset.describe() for dataset in datasets.values()] def get_dataset(name): return datasets[name] if name in datasets else None @@ -76,6 +73,12 @@ class SqlDataset: base_model = declarative_base(engine) self.base_model = base_model + def describe(self): + return { + 'name': self.name, + 'tables': list(self.tables.keys()), + } + def get_table(self, type): if type in self.tables: return self.tables[type] diff --git a/megapixels/app/server/api.py b/megapixels/app/server/api.py index e7db11f1..428c53b1 100644 --- a/megapixels/app/server/api.py +++ b/megapixels/app/server/api.py @@ -32,11 +32,18 @@ api = Blueprint('api', __name__) def index(): return jsonify({ 'datasets': list_datasets() }) -@api.route('/dataset//test', methods=['POST']) -def test(dataset='test'): - dataset = get_dataset(dataset) +@api.route('/dataset/') +def show(name): + dataset = get_dataset(name) + if dataset: + return jsonify(dataset.describe()) + else: + return jsonify({ 'status': 404 }) + +@api.route('/dataset//test', methods=['POST']) +def test(name): print('hiiiiii') - return jsonify({ 'test': 'OK', 'dataset': dataset }) + return jsonify({ 'test': 'OK', 'dataset': name }) # @router.route('//face', methods=['POST']) # def upload(name): -- cgit v1.2.3-70-g09d2 From 485cf0e4665c660d4e5e1fba00a95bc8036809c6 Mon Sep 17 00:00:00 2001 From: Jules Laplace Date: Sat, 15 Dec 2018 16:40:34 +0100 Subject: db stuff --- faiss/__init__.py | 0 faiss/server.py | 68 ----------------- megapixels/app/server/api.py | 97 +++++++++---------------- site/public/datasets/lfw/index.html | 67 +++++++++++++---- site/public/datasets/vgg_face2/index.html | 84 +++++++++++++++++++++ site/public/index.html | 1 + site/public/research/00_introduction/index.html | 86 ++++++++++++++++++++++ site/public/research/index.html | 2 +- 8 files changed, 259 insertions(+), 146 deletions(-) delete mode 100644 faiss/__init__.py delete mode 100644 faiss/server.py create mode 100644 site/public/datasets/vgg_face2/index.html create mode 100644 site/public/research/00_introduction/index.html (limited to 'megapixels/app/server/api.py') diff --git a/faiss/__init__.py b/faiss/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/faiss/server.py b/faiss/server.py deleted file mode 100644 index a8c660fa..00000000 --- a/faiss/server.py +++ /dev/null @@ -1,68 +0,0 @@ -#!python - -import os -import sys -import json -import time -import argparse -import cv2 as cv -import numpy as np -from datetime import datetime -from flask import Flask, request, render_template, jsonify -from PIL import Image # todo: try to remove PIL dependency -import re - -sanitize_re = re.compile('[\W]+') -valid_exts = ['.gif', '.jpg', '.jpeg', '.png'] - -from dotenv import load_dotenv -load_dotenv() - -from feature_extractor import FeatureExtractor - -DEFAULT_LIMIT = 50 - -app = Flask(__name__, static_url_path="/search/static", static_folder="static") - -# static api routes - this routing is actually handled in the JS -@app.route('/', methods=['GET']) -def index(): - return app.send_static_file('metadata.html') - -# search using an uploaded file -@app.route('/search/api/upload', methods=['POST']) -def upload(): - file = request.files['query_img'] - fn = file.filename - if fn.endswith('blob'): - fn = 'filename.jpg' - - basename, ext = os.path.splitext(fn) - print("got {}, type {}".format(basename, ext)) - if ext.lower() not in valid_exts: - return jsonify({ 'error': 'not an image' }) - - uploaded_fn = datetime.now().isoformat() + "_" + basename - uploaded_fn = sanitize_re.sub('', uploaded_fn) - uploaded_img_path = "static/uploaded/" + uploaded_fn + ext - uploaded_img_path = uploaded_img_path.lower() - print('query: {}'.format(uploaded_img_path)) - - img = Image.open(file.stream).convert('RGB') - # img.save(uploaded_img_path) - # vec = db.load_feature_vector_from_file(uploaded_img_path) - vec = fe.extract(img) - # print(vec.shape) - - results = db.search(vec, limit=limit) - query = { - 'timing': time.time() - start, - } - print(results) - return jsonify({ - 'results': results, - }) - -if __name__=="__main__": - app.run("0.0.0.0", debug=False) - diff --git a/megapixels/app/server/api.py b/megapixels/app/server/api.py index 428c53b1..c5e27dd2 100644 --- a/megapixels/app/server/api.py +++ b/megapixels/app/server/api.py @@ -1,30 +1,13 @@ -from flask import Blueprint, jsonify +import os +import re +import time +from flask import Blueprint, request, jsonify +from PIL import Image # todo: try to remove PIL dependency from app.models.sql_factory import list_datasets, get_dataset, get_table -# from jinja2 import TemplateNotFound - -# import os -# import sys -# import json -# import time -# import argparse -# import cv2 as cv -# import numpy as np -# from datetime import datetime -# from flask import Flask, request, render_template, jsonify -# from PIL import Image # todo: try to remove PIL dependency -# import re - -# sanitize_re = re.compile('[\W]+') -# valid_exts = ['.gif', '.jpg', '.jpeg', '.png'] - -# from dotenv import load_dotenv -# load_dotenv() - -# from feature_extractor import FeatureExtractor - -# DEFAULT_LIMIT = 50 +sanitize_re = re.compile('[\W]+') +valid_exts = ['.gif', '.jpg', '.jpeg', '.png'] api = Blueprint('api', __name__) @@ -40,40 +23,32 @@ def show(name): else: return jsonify({ 'status': 404 }) -@api.route('/dataset//test', methods=['POST']) -def test(name): - print('hiiiiii') - return jsonify({ 'test': 'OK', 'dataset': name }) - -# @router.route('//face', methods=['POST']) -# def upload(name): -# file = request.files['query_img'] -# fn = file.filename -# if fn.endswith('blob'): -# fn = 'filename.jpg' - -# basename, ext = os.path.splitext(fn) -# print("got {}, type {}".format(basename, ext)) -# if ext.lower() not in valid_exts: -# return jsonify({ 'error': 'not an image' }) - -# uploaded_fn = datetime.now().isoformat() + "_" + basename -# uploaded_fn = sanitize_re.sub('', uploaded_fn) -# uploaded_img_path = "static/uploaded/" + uploaded_fn + ext -# uploaded_img_path = uploaded_img_path.lower() -# print('query: {}'.format(uploaded_img_path)) - -# img = Image.open(file.stream).convert('RGB') -# # img.save(uploaded_img_path) -# # vec = db.load_feature_vector_from_file(uploaded_img_path) -# vec = fe.extract(img) -# # print(vec.shape) - -# results = db.search(vec, limit=limit) -# query = { -# 'timing': time.time() - start, -# } -# print(results) -# return jsonify({ -# 'results': results, -# }) +@api.route('/dataset//face', methods=['POST']) +def upload(name): + file = request.files['query_img'] + fn = file.filename + if fn.endswith('blob'): + fn = 'filename.jpg' + + basename, ext = os.path.splitext(fn) + print("got {}, type {}".format(basename, ext)) + if ext.lower() not in valid_exts: + return jsonify({ 'error': 'not an image' }) + + img = Image.open(file.stream).convert('RGB') + + # vec = db.load_feature_vector_from_file(uploaded_img_path) + # vec = fe.extract(img) + # print(vec.shape) + # results = db.search(vec, limit=limit) + + query = { + 'timing': time.time() - start, + } + results = [] + + print(results) + return jsonify({ + 'query': query, + 'results': results, + }) diff --git a/site/public/datasets/lfw/index.html b/site/public/datasets/lfw/index.html index 39052b44..e080229f 100644 --- a/site/public/datasets/lfw/index.html +++ b/site/public/datasets/lfw/index.html @@ -26,23 +26,50 @@
-

Labeled Faces in The Wild

-
Created
2007
Images
13,233
People
5,749
Created From
Yahoo News images
Search available
Searchable

Labeled Faces in The Wild (LFW) is amongst the most widely used facial recognition training datasets in the world and is the first of its kind to be created entirely from images that were posted online. The LFW dataset includes 13,233 images of 5,749 people that were collected between 2002-2004. Use the tools below to check if you were included in this dataset or scroll down to read the analysis.

+

Labeled Faces in the Wild

+
Created
2007
Images
13,233
People
5,749
Created From
Yahoo News images
Search available
Searchable

Labeled Faces in The Wild (LFW) is amongst the most widely used facial recognition training datasets in the world and is the first of its kind to be created entirely from images posted online. The LFW dataset includes 13,233 images of 5,749 people that were collected between 2002-2004. Use the tools below to check if you were included in this dataset or scroll down to read the analysis.

{INSERT IMAGE SEARCH MODULE}

{INSERT TEXT SEARCH MODULE}

-
Eight out of 5,749 people in the Labeled Faces in the Wild dataset. The face recognition training dataset is created entirely from photos downloaded from the Internet.
Eight out of 5,749 people in the Labeled Faces in the Wild dataset. The face recognition training dataset is created entirely from photos downloaded from the Internet.

INTRO

-

It began in 2002. Researchers at University of Massachusetts Amherst were developing algorithms for facial recognition and they needed more data. Between 2002-2004 they scraped Yahoo News for images of public figures. Two years later they cleaned up the dataset and repackaged it as Labeled Faces in the Wild (LFW).

-

Since then the LFW dataset has become one of the most widely used datasets used for evaluating face recognition algorithms. The associated research paper “Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments” has been cited 996 times reaching 45 different countries throughout the world.

-

The faces come from news stories and are mostly celebrities from the entertainment industry, politicians, and villains. It’s a sampling of current affairs and breaking news that has come to pass. The images, detached from their original context now server a new purpose: to train, evaluate, and improve facial recognition.

-

As the most widely used facial recognition dataset, it can be said that each individual in LFW has, in a small way, contributed to the current state of the art in facial recognition surveillance. John Cusack, Julianne Moore, Barry Bonds, Osama bin Laden, and even Moby are amongst these biometric pillars, exemplar faces provided the visual dimensions of a new computer vision future.

-
The entire LFW dataset cropped to facial regions
The entire LFW dataset cropped to facial regions

In addition to commercial use as an evaluation tool, alll of the faces in LFW dataset are prepackaged into a popular machine learning code framework called scikit-learn.

-

Facts

-

The person with the most images is: -The person with the least images is:

-

Commercial Use

+
load file: lfw_names_gender_kg_min.csv
+Name, Images, Gender, Description
+
+
Eighteen of the 5,749 people in the Labeled Faces in the Wild Dataset. The most widely used face dataset for benchmarking commercial face recognition algorithms.
Eighteen of the 5,749 people in the Labeled Faces in the Wild Dataset. The most widely used face dataset for benchmarking commercial face recognition algorithms.

Intro

+

Three paragraphs describing the LFW dataset in a format that can be easily replicated for the other datasets. Nothing too custom. An analysis of the initial research papers with context relative to all the other dataset papers.

+
 all 5,749 people in the LFW Dataset sorted from most to least images collected.
all 5,749 people in the LFW Dataset sorted from most to least images collected.

LFW by the Numbers

+
    +
  • Was first published in 2007
  • +
  • Developed out of a prior dataset from Berkely called "Faces in the Wild" or "Names and Faces" [^lfw_original_paper]
  • +
  • Includes 13,233 images and 5,749 different people [^lfw_website]
  • +
  • There are about 3 men for every 1 woman (4,277 men and 1,472 women)[^lfw_website]
  • +
  • The person with the most images is George W. Bush with 530
  • +
  • Most people (70%) in the dataset have only 1 image
  • +
  • Thre are 1,680 people in the dataset with 2 or more images [^lfw_website]
  • +
  • Two out of 4 of the original authors received funding from the Office of Director of National Intelligence and IARPA for their 2016 LFW survey follow up report
  • +
  • The LFW dataset includes over 500 actors, 30 models, 10 presidents, 24 football players, 124 basketball players, 11 kings, and 2 queens
  • +
  • In all the LFW publications provided by the authors the words "ethics", "consent", and "privacy" appear 0 times [^lfw_original_paper], [^lfw_survey], [^lfw_tech_report] , [^lfw_website]
  • +
  • The word "future" appears 71 times
  • +
+

Facts

+
    +
  • Was created for the purpose of improving "unconstrained face recognition" [^lfw_original_paper]
  • +
  • All images in LFW were obtained "in the wild" meaning without any consent from the subject or from the photographer
  • +
  • The faces were detected using the Viola-Jones haarcascade face detector [^lfw_website] [^lfw_survey]
  • +
  • Is considered the "most popular benchmark for face recognition" [^lfw_baidu]
  • +
  • Is "the most widely used evaluation set in the field of facial recognition" [^lfw_pingan]
  • +
  • Is used by several of the largest tech companies in the world including "Google, Facebook, Microsoft Research Asia, Baidu, Tencent, SenseTime, Face++ and Chinese University of Hong Kong." [^lfw_pingan]
  • +
+

need citations

+
    +
  • All images were copied from Yahoo News between 2002 - 2004 [^lfw_original_paper]
  • +
  • SenseTime, who has relied on LFW for benchmarking their facial recognition performance, is the leading provider of surveillance to the Chinese Government (need citation)
  • +
+
 former President George W. Bush
former President George W. Bush
+
 Colin Powel (236), Tony Blair (144), and Donald Rumsfeld (121)
Colin Powel (236), Tony Blair (144), and Donald Rumsfeld (121)

People and Companies using the LFW Dataset

+

This section describes who is using the dataset and for what purposes. It should include specific examples of people or companies with citations and screenshots. This section is followed up by the graph, the map, and then the supplementary material.

The LFW dataset is used by numerous companies for benchmarking algorithms and in some cases training. According to the benchmarking results page [^lfw_results] provided by the authors, over 2 dozen companies have contributed their benchmark results.

According to BiometricUpdate.com [^lfw_pingan], LFW is "the most widely used evaluation set in the field of facial recognition, LFW attracts a few dozen teams from around the globe including Google, Facebook, Microsoft Research Asia, Baidu, Tencent, SenseTime, Face++ and Chinese University of Hong Kong."

According to researchers at the Baidu Research – Institute of Deep Learning "LFW has been the most popular evaluation benchmark for face recognition, and played a very important role in facilitating the face recognition society to improve algorithm. [^lfw_baidu]."

+

In addition to commercial use as an evaluation tool, alll of the faces in LFW dataset are prepackaged into a popular machine learning code framework called scikit-learn.

load file: lfw_commercial_use.csv
 name_display,company_url,example_url,country,description
 
@@ -83,13 +110,18 @@ name_display,company_url,example_url,country,description

The LFW face recognition training and evaluation dataset is a historically important face dataset as it was the first popular dataset to be created entirely from Internet images, paving the way for a global trend towards downloading anyone’s face from the Internet and adding it to a dataset. As will be evident with other datasets, LFW’s approach has now become the norm.

For all the 5,000 people in this datasets, their face is forever a part of facial recognition history. It would be impossible to remove anyone from the dataset because it is so ubiquitous. For their rest of the lives and forever after, these 5,000 people will continue to be used for training facial recognition surveillance.

Right to Removal

-

If you are affected by disclosure of your identity in this dataset please do contact the authors, many state that they are willing to remove images upon request. The authors of the LFW can be reached from the emails posted in their paper:

+

If you are affected by disclosure of your identity in this dataset please do contact the authors. Many have stated that they are willing to remove images upon request. The authors of the LFW dataset provide the following email for inquiries:

You can use the following message to request removal from the dataset:

+

To: Gary Huang mailto:gbhuang@cs.umass.edu

+

Subject: Request for Removal from LFW Face Dataset

Dear [researcher name],

-

I am writing to you about the "LFW Dataset". Recently I have discovered that your dataset includes my identity and no longer wish to be included in your dataset

-

MegaPixels is an educational art project developed for academic purposes. In no way does this project aim to villify the researchers who produced the datasets. The aim of this project is to encourage discourse around ethics and consent in artificial intelligence by providing information about these datasets that is otherwise difficult to obtain or inaccessible to other researchers.

+

I am writing to you about the "Labeled Faces in The Wild Dataset". Recently I discovered that your dataset includes my identity and I no longer wish to be included in your dataset.

+

The dataset is being used thousands of companies around the world to improve facial recognition software including usage by governments for the purpose of law enforcement, national security, tracking consumers in retail environments, and tracking individuals through public spaces.

+

My name as it appears in your dataset is [your name]. Please remove all images from your dataset and inform your newsletter subscribers to likewise update their copies.

+

- [your name]

+

Supplementary Data

-

Sed ut perspiciatis, unde omnis iste natus error sit voluptatem accusantium doloremque laudantium, totam rem aperiam eaque ipsa, quae ab illo inventore veritatis et quasi architecto beatae vitae dicta sunt, explicabo. Nemo enim ipsam voluptatem, quia voluptas sit, aspernatur aut odit aut fugit, sed quia consequuntur magni dolores eos.

+

Researchers, journ

@@ -221,6 +253,9 @@ imageio.imwrite('lfw_montage_full.png', montage) montage_960 = imutils.resize(montage, width=960) imageio.imwrite('lfw_montage_960.jpg', montage_960) +

Disclaimer

+

MegaPixels is an educational art project designed to encourage discourse about facial recognition datasets. Any ethical or legal issues should be directed to the researcher's parent organizations. Except where necessary for contact or clarity, the names of researchers have been subsituted by their parent organization. In no way does this project aim to villify researchers who produced the datasets.

+

Read more about MegaPixels Code of Conduct


    diff --git a/site/public/datasets/vgg_face2/index.html b/site/public/datasets/vgg_face2/index.html new file mode 100644 index 00000000..24a1059b --- /dev/null +++ b/site/public/datasets/vgg_face2/index.html @@ -0,0 +1,84 @@ + + + + MegaPixels + + + + + + + + + +
    + + +
    MegaPixels
    + The Darkside of Datasets +
    + +
    +
    + +

    VGG Faces2

    +
    Created
    2018
    Images
    3.3M
    People
    9,000
    Created From
    Scraping search engines
    Search available
    [Searchable](#)

    VGG Face2 is the updated version of the VGG Face dataset and now includes over 3.3M face images from over 9K people. The identities were selected by taking the top 500K identities in Google's Knowledge Graph of celebrities and then selecting only the names that yielded enough training images. The dataset was created in the UK but funded by Office of Director of National Intelligence in the United States.

    +

    {INSERT IMAGE SEARCH MODULE}

    +

    {INSERT TEXT SEARCH MODULE}

    +
    load file: lfw_names_gender_kg_min.csv
    +Name, Images, Gender, Description
    +
    +

    VGG Face2 by the Numbers

    +
      +
    • 1,331 actresses, 139 presidents
    • +
    • 3 husbands and 16 wives
    • +
    • 2 snooker player
    • +
    • 1 guru
    • +
    • 1 pornographic actress
    • +
    • 3 computer programmer
    • +
    +

    Names and descriptions

    +
      +
    • The original VGGF2 name list has been updated with the results returned from Google Knowledge
    • +
    • Names with a similarity score greater than 0.75 where automatically updated. Scores computed using import difflib; seq = difflib.SequenceMatcher(a=a.lower(), b=b.lower()); score = seq.ratio()
    • +
    • The 97 names with a score of 0.75 or lower were manually reviewed and includes name changes validating using Wikipedia.org results for names such as "Bruce Jenner" to "Caitlyn Jenner", spousal last-name changes, and discretionary changes to improve search results such as combining nicknames with full name when appropriate, for example changing "Aleksandar Petrović" to "Aleksandar 'Aco' Petrović" and minor changes such as "Mohammad Ali" to "Muhammad Ali"
    • +
    • The 'Description` text was automatically added when the Knowledge Graph score was greater than 250
    • +
    +

    TODO

    +
      +
    • create name list, and populate with Knowledge graph information like LFW
    • +
    • make list of interesting number stats, by the numbers
    • +
    • make list of interesting important facts
    • +
    • write intro abstract
    • +
    • write analysis of usage
    • +
    • find examples, citations, and screenshots of useage
    • +
    • find list of companies using it for table
    • +
    • create montages of the dataset, like LFW
    • +
    • create right to removal information
    • +
    +
    + +
    + + + + + \ No newline at end of file diff --git a/site/public/index.html b/site/public/index.html index 51006b59..91ff467a 100644 --- a/site/public/index.html +++ b/site/public/index.html @@ -71,6 +71,7 @@ + diff --git a/site/public/research/00_introduction/index.html b/site/public/research/00_introduction/index.html new file mode 100644 index 00000000..8f598f5b --- /dev/null +++ b/site/public/research/00_introduction/index.html @@ -0,0 +1,86 @@ + + + + MegaPixels + + + + + + + + + +
    + + +
    MegaPixels
    + The Darkside of Datasets +
    + +
    +
    + +
    +

    Untitled Page

    +
    +
    +
    Posted
    +
    2018-12-31
    +
    +
    +
    By
    +
    Adam Harvey
    +
    + +
    +
    + +

    It was the early 2000s. Face recognition was new and no one seemed sure exactly how well it was going to perform in practice. In theory, face recognition was poised to be a game changer, a force multiplier, a strategic military advantage, a way to make cities safer and to secure borders. This was the future John Ashcroft demanded with the Total Information Awareness act of the 2003 and that spooks had dreamed of for decades. It was a future that academics at Carnegie Mellon Universtiy and Colorado State University would help build. It was also a future that celebrities would play a significant role in building. And to the surprise of ordinary Internet users like myself and perhaps you, it was a future that millions of Internet users would unwittingly play role in creating.

    +

    Now the future has arrived and it doesn't make sense. Facial recognition works yet it doesn't actually work. Facial recognition is cheap and accessible but also expensive and out of control. Facial recognition research has achieved headline grabbing superhuman accuracies over 99.9% yet facial recognition is also dangerously inaccurate. During a trial installation at Sudkreuz station in Berlin in 2018, 20% of the matches were wrong, a number so low that it should not have any connection to law enforcement or justice. And in London, the Metropolitan police had been using facial recognition software that mistakenly identified an alarming 98% of people as criminals 1, which perhaps is a crime itself.

    +

    MegaPixels is an online art project that explores the history of facial recognition from the perspective of datasets. To paraphrase the artist Trevor Paglen, whoever controls the dataset controls the meaning. MegaPixels aims to unravel the meanings behind the data and expose the darker corners of the biometric industry that have contributed to its growth. MegaPixels does not start with a conclusion, a moralistic slant, or a

    +

    Whether or not to build facial recognition was a question that can no longer be asked. As an outspoken critic of face recognition I've developed, and hopefully furthered, my understanding during the last 10 years I've spent working with computer vision. Though I initially disagreed, I've come to see technocratic perspective as a non-negotiable reality. As Oren (nytimes article) wrote in NYT Op-Ed "the horse is out of the barn" and the only thing we can do collectively or individually is to steer towards the least worse outcome. Computational communication has entered a new era and it's both exciting and frightening to explore the potentials and opportunities. In 1997 getting access to 1 teraFLOPS of computational power would have cost you $55 million and required a strategic partnership with the Department of Defense. At the time of writing, anyone can rent 1 teraFLOPS on a cloud GPU marketplace for less than $1/day. 2.

    +

    I hope that this project will illuminate the darker areas of strange world of facial recognition that have not yet received attention and encourage discourse in academic, industry, and . By no means do I believe discourse can save the day. Nor do I think creating artwork can. In fact, I'm not exactly sure what the outcome of this project will be. The project is not so much what I publish here but what happens after. This entire project is only a prologue.

    +

    As McLuhan wrote, "You can't have a static, fixed position in the electric age". And in our hyper-connected age of mass surveillance, artificial intelligece, and unevenly distributed virtual futures the most irrational thing to be is rational. Increasingly the world is becoming a contradiction where people use surveillance to protest surveillance, use

    +

    Like many projects, MegaPixels had spent years meandering between formats, unfeasible budgets, and was generally too niche of a subject. The basic idea for this project, as proposed to the original Glass Room installation in 2016 in NYC, was to build an interactive mirror that showed people if they had been included in the LFW facial recognition dataset. The idea was based on my reaction to all the datasets I'd come across during research for the CV Dazzle project. I'd noticed strange datasets created for training and testing face detection algorithms. Most were created in labratory settings and their interpretation of face data was very strict.

    +

    About the name

    +

    About the funding

    +

    About me

    +

    About the team

    +

    Conclusion

    +

    for other post

    +

    It was the early 2000s. Face recognition was new and no one seemed sure how well it was going to perform in practice. In theory, face recognition was poised to be a game changer, a force multiplier, a strategic military advantage, a way to make cities safer and to secure the borders. It was the future that John Ashcroft demanded with the Total Information Awareness act of the 2003. It was a future that academics helped build. It was a future that celebrities helped build. And it was a future that

    +

    A decade earlier the Department of Homeland Security and the Counterdrug Technology Development Program Office initated a feasibilty study called FERET (FacE REcognition Technology) to "develop automatic face recognition capabilities that could be employed to assist security, intelligence, and law enforcement personnel in the performance of their duties [^feret_website]."

    +

    One problem with FERET dataset was that the photos were in controlled settings. For face recognition to work it would have to be used in uncontrolled settings. Even newer datasets such as the Multi-PIE (Pose, Illumination, and Expression) from Carnegie Mellon University included only indoor photos of cooperative subjects. Not only were the photos completely unrealistic, CMU's Multi-Pie included only 18 individuals and cost $500 for academic use [^cmu_multipie_cost], took years to create, and required consent from every participant.

    +
    +
    +
    1. Sharman, Jon. "Metropolitan Police's facial recognition technology 98% inaccurate, figures show". 2018. https://www.independent.co.uk/news/uk/home-news/met-police-facial-recognition-success-south-wales-trial-home-office-false-positive-a8345036.html

    2. +
    3. Calle, Dan. "Supercomptuers". 1997. http://ei.cs.vt.edu/~history/SUPERCOM.Calle.HTML

    4. +
    +
    +
    + +
    + + + + + \ No newline at end of file diff --git a/site/public/research/index.html b/site/public/research/index.html index cf9546e1..59a5fee9 100644 --- a/site/public/research/index.html +++ b/site/public/research/index.html @@ -28,7 +28,7 @@

    Research Blog

    The darkside of datasets and the future of computer vision

    -
    +
    -- cgit v1.2.3-70-g09d2 From a53a598461a25e8bf1d0bd3e63c47642e3213aef Mon Sep 17 00:00:00 2001 From: Jules Laplace Date: Sat, 15 Dec 2018 22:47:28 +0100 Subject: client stuff --- client/actions.js | 8 +++--- client/index.js | 60 ++++++++++++++++++++++++++++++++++++-------- client/session.js | 6 ++--- client/store.js | 26 ++++++++----------- client/util.js | 2 ++ megapixels/app/server/api.py | 26 ++++++++++++++++++- megapixels/cli_flask.py | 1 + site/templates/research.html | 4 +++ 8 files changed, 98 insertions(+), 35 deletions(-) (limited to 'megapixels/app/server/api.py') diff --git a/client/actions.js b/client/actions.js index ba899f06..3fd86e0d 100644 --- a/client/actions.js +++ b/client/actions.js @@ -1,9 +1,9 @@ import * as search from './search/search.actions' -import * as review from './review/review.actions' -import * as metadata from './metadata/metadata.actions' +// import * as review from './review/review.actions' +// import * as metadata from './metadata/metadata.actions' export { search, - review, - metadata, + // review, + // metadata, } diff --git a/client/index.js b/client/index.js index eddc5fb2..03015988 100644 --- a/client/index.js +++ b/client/index.js @@ -3,17 +3,55 @@ import ReactDOM from 'react-dom' import { AppContainer } from 'react-hot-loader' import { Provider } from 'react-redux' -import App from './app' +// import App from './app' -import { store, history } from './store' +import { store } from './store' -const container = document.createElement('div') -document.body.appendChild(container) +// const container = document.createElement('div') +// document.body.appendChild(container) -ReactDOM.render( - - - - - , container -) +toArray(document.querySelectorAll('.applet')).forEach(el => { + try { + const payload = JSON.parse(el.dataSet.getItem('payload')) + } catch(e) { + return + } + switch (payload.command) { + case 'load file': + append_tabulator(el, payload) + break + default: + append_react_applet(el, payload) + break + } +}) + +function append_react_applet(el, payload) { + ReactDOM.render( + + + + + , el + ) +} +function append_tabulator(el, payload) { + const table = new Tabulator(el, { + height:"311px", + layout:"fitColumns", + placeholder:"No Data Set", + columns:[ + // {title:"Name", field:"name", sorter:"string", width:200}, + // {title:"Progress", field:"progress", sorter:"number", formatter:"progress"}, + // {title:"Gender", field:"gender", sorter:"string"}, + // {title:"Rating", field:"rating", formatter:"star", align:"center", width:100}, + // {title:"Favourite Color", field:"col", sorter:"string", sortable:false}, + // {title:"Date Of Birth", field:"dob", sorter:"date", align:"center"}, + // {title:"Driver", field:"car", align:"center", formatter:"tickCross", sorter:"boolean"}, + ], + }) + const columns = payload.fields.split(', ') + console.log(columns) + if () + table.setData(path) +} \ No newline at end of file diff --git a/client/session.js b/client/session.js index 5bfae7eb..0fae31d2 100644 --- a/client/session.js +++ b/client/session.js @@ -1,5 +1,5 @@ -import Storage from 'store2' +// import Storage from 'store2' -const session = Storage.namespace('vcat.search') +// const session = Storage.namespace('vcat.search') -export default session +// export default session diff --git a/client/store.js b/client/store.js index 043af351..9c0f78cd 100644 --- a/client/store.js +++ b/client/store.js @@ -1,30 +1,25 @@ import { applyMiddleware, compose, combineReducers, createStore } from 'redux' -import { connectRouter, routerMiddleware } from 'connected-react-router' -import { createBrowserHistory } from 'history' import thunk from 'redux-thunk' -import { login } from './util' -import metadataReducer from './metadata/metadata.reducer' -import searchReducer from './search/search.reducer' -import reviewReducer from './review/review.reducer' +// import metadataReducer from './metadata/metadata.reducer' +// import searchReducer from './search/search.reducer' +// import reviewReducer from './review/review.reducer' const rootReducer = combineReducers({ - auth: (state = login()) => state, - metadata: metadataReducer, - search: searchReducer, - review: reviewReducer, + // auth: (state = login()) => state, + // metadata: metadataReducer, + // search: searchReducer, + // review: reviewReducer, }) -function configureStore(initialState = {}, history) { +function configureStore(initialState = {}) { const composeEnhancers = window.__REDUX_DEVTOOLS_EXTENSION_COMPOSE__ || compose const store = createStore( - connectRouter(history)(rootReducer), // new root reducer with router state initialState, composeEnhancers( applyMiddleware( thunk, - routerMiddleware(history) ), ), ) @@ -32,7 +27,6 @@ function configureStore(initialState = {}, history) { return store } -const history = createBrowserHistory() -const store = configureStore({}, history) +const store = configureStore({}) -export { store, history } +export { store } diff --git a/client/util.js b/client/util.js index ad303c64..92b4addc 100644 --- a/client/util.js +++ b/client/util.js @@ -6,6 +6,8 @@ export const isAndroid = !!(navigator.userAgent.match(/Android/i)) export const isMobile = isiPhone || isiPad || isAndroid export const isDesktop = !isMobile +export const toArray = a => Array.prototype.slice.apply(a) + const htmlClassList = document.body.parentNode.classList htmlClassList.add(isDesktop ? 'desktop' : 'mobile') diff --git a/megapixels/app/server/api.py b/megapixels/app/server/api.py index c5e27dd2..cd2b950b 100644 --- a/megapixels/app/server/api.py +++ b/megapixels/app/server/api.py @@ -23,8 +23,10 @@ def show(name): else: return jsonify({ 'status': 404 }) -@api.route('/dataset//face', methods=['POST']) +@api.route('/dataset//face', methods=['POST']) def upload(name): + start = time.time() + dataset = get_dataset(name) file = request.files['query_img'] fn = file.filename if fn.endswith('blob'): @@ -42,6 +44,28 @@ def upload(name): # print(vec.shape) # results = db.search(vec, limit=limit) + # with the result we have an ID + # query the sql dataset for the UUID etc here + + query = { + 'timing': time.time() - start, + } + results = [] + + print(results) + return jsonify({ + 'query': query, + 'results': results, + }) + +@api.route('/dataset//name', methods=['GET']) +def name_lookup(dataset): + start = time.time() + dataset = get_dataset(name) + + # we have a query from the request query string... + # use this to do a like* query on the identities_meta table + query = { 'timing': time.time() - start, } diff --git a/megapixels/cli_flask.py b/megapixels/cli_flask.py index 369bec01..e80526c6 100644 --- a/megapixels/cli_flask.py +++ b/megapixels/cli_flask.py @@ -1,5 +1,6 @@ # -------------------------------------------------------- # wrapper for flask CLI API +# NB: python cli_flask.py run # -------------------------------------------------------- import click diff --git a/site/templates/research.html b/site/templates/research.html index 0bb9fa30..b2ea3a6b 100644 --- a/site/templates/research.html +++ b/site/templates/research.html @@ -21,3 +21,7 @@ {{ content }} {% endblock %} + +{% block scripts %} + +{% endblock %} \ No newline at end of file -- cgit v1.2.3-70-g09d2 From 3b10acc73247ec703ed47f0423e7d255a91f074e Mon Sep 17 00:00:00 2001 From: Jules Laplace Date: Sun, 16 Dec 2018 00:11:20 +0100 Subject: tabulator --- client/index.js | 29 +- megapixels/app/server/api.py | 17 +- megapixels/app/server/create.py | 7 + megapixels/app/site/builder.py | 10 + megapixels/app/site/parser.py | 36 +- megapixels/app/site/s3.py | 3 + site/assets/css/applets.css | 2 - site/assets/css/tabulator.css | 761 +++++++++++++++++++++ site/public/about/credits/index.html | 2 +- site/public/about/disclaimer/index.html | 2 +- site/public/about/index.html | 2 +- site/public/about/press/index.html | 2 +- site/public/about/privacy/index.html | 2 +- site/public/about/style/index.html | 2 +- site/public/about/terms/index.html | 2 +- site/public/datasets/lfw/index.html | 6 +- site/public/datasets/vgg_face2/index.html | 4 +- site/public/index.html | 2 +- site/public/research/00_introduction/index.html | 2 +- .../research/01_from_1_to_100_pixels/index.html | 2 +- site/public/research/index.html | 2 +- site/templates/layout.html | 1 + 22 files changed, 862 insertions(+), 36 deletions(-) create mode 100755 site/assets/css/tabulator.css (limited to 'megapixels/app/server/api.py') diff --git a/client/index.js b/client/index.js index bc57f548..2ee12c14 100644 --- a/client/index.js +++ b/client/index.js @@ -23,23 +23,24 @@ function appendTabulator(el, payload) { height: '311px', layout: 'fitColumns', placeholder: 'No Data Set', - columns: [ - // {title:'Name', field:'name', sorter:'string', width:200}, - // {title:'Progress', field:'progress', sorter:'number', formatter:'progress'}, - // {title:'Gender', field:'gender', sorter:"string"}, - // {title:"Rating", field:"rating", formatter:"star", align:"center", width:100}, - // {title:"Favourite Color", field:"col", sorter:"string", sortable:false}, - // {title:"Date Of Birth", field:"dob", sorter:"date", align:"center"}, - // {title:"Driver", field:"car", align:"center", formatter:"tickCross", sorter:"boolean"}, - ], + columns: payload.fields.split(', ').map(field => { + switch (field) { + default: + return { title: field, field: field.toLowerCase(), sorter: 'string' } + } + }), + // {title:'Name', field:'name', sorter:'string', width:200}, + // {title:'Progress', field:'progress', sorter:'number', formatter:'progress'}, + // {title:'Gender', field:'gender', sorter:"string"}, + // {title:"Rating", field:"rating", formatter:"star", align:"center", width:100}, + // {title:"Favourite Color", field:"col", sorter:"string", sortable:false}, + // {title:"Date Of Birth", field:"dob", sorter:"date", align:"center"}, + // {title:"Driver", field:"car", align:"center", formatter:"tickCross", sorter:"boolean"}, }) let path = payload.opt let columns = payload.fields.split(',').map(s => s.trim()) - console.log(columns) - if (path[0] !== '/') { - console.log(path) - } - // table.setData(path) + console.log(path, columns) + table.setData(path) } function appendApplets() { diff --git a/megapixels/app/server/api.py b/megapixels/app/server/api.py index cd2b950b..cf8241bd 100644 --- a/megapixels/app/server/api.py +++ b/megapixels/app/server/api.py @@ -1,9 +1,12 @@ import os import re import time +import dlib from flask import Blueprint, request, jsonify from PIL import Image # todo: try to remove PIL dependency +from app.processors import face_recognition +from app.processors import face_detector from app.models.sql_factory import list_datasets, get_dataset, get_table sanitize_re = re.compile('[\W]+') @@ -39,8 +42,18 @@ def upload(name): img = Image.open(file.stream).convert('RGB') - # vec = db.load_feature_vector_from_file(uploaded_img_path) - # vec = fe.extract(img) + # Face detection + detector = face_detector.DetectorDLIBHOG() + + # get detection as BBox object + bboxes = detector.detect(im, largest=True) + bbox = bboxes[0] + dim = im.shape[:2][::-1] + bbox = bbox.to_dim(dim) # convert back to real dimensions + + # face recognition/vector + recognition = face_recognition.RecognitionDLIB(gpu=-1) + # print(vec.shape) # results = db.search(vec, limit=limit) diff --git a/megapixels/app/server/create.py b/megapixels/app/server/create.py index c1f41dc4..4b1333b9 100644 --- a/megapixels/app/server/create.py +++ b/megapixels/app/server/create.py @@ -7,6 +7,9 @@ from app.server.api import api db = SQLAlchemy() def create_app(script_info=None): + """ + functional pattern for creating the flask app + """ app = Flask(__name__, static_folder='static', static_url_path='') app.config['SQLALCHEMY_DATABASE_URI'] = connection_url app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False @@ -37,6 +40,10 @@ def create_app(script_info=None): return app def serve_page(file_relative_path_to_root): + """ + trying to get this to serve /path/ with /path/index.html, + ...but it doesnt actually matter for production... + """ if file_relative_path_to_root[-1] == '/': file_relative_path_to_root += 'index.html' return send_from_directory("static", file_relative_path_to_root) diff --git a/megapixels/app/site/builder.py b/megapixels/app/site/builder.py index 91df54c2..895f265b 100644 --- a/megapixels/app/site/builder.py +++ b/megapixels/app/site/builder.py @@ -15,6 +15,10 @@ env = Environment( ) def build_page(fn, research_posts): + """ + build a single page from markdown into the appropriate template + - writes it to site/public/ + """ metadata, sections = parser.read_metadata(fn) if metadata is None: @@ -61,6 +65,9 @@ def build_page(fn, research_posts): file.write(html) def build_research_index(research_posts): + """ + build the index of research (blog) posts + """ metadata, sections = parser.read_metadata('../site/content/research/index.md') template = env.get_template("page.html") s3_path = s3.make_s3_path(cfg.S3_SITE_PATH, metadata['path']) @@ -77,6 +84,9 @@ def build_research_index(research_posts): file.write(html) def build_site(): + """ + build the site! =^) + """ research_posts = parser.read_research_post_index() for fn in glob.iglob(os.path.join(cfg.DIR_SITE_CONTENT, "**/*.md"), recursive=True): build_page(fn, research_posts) diff --git a/megapixels/app/site/parser.py b/megapixels/app/site/parser.py index d78cc402..40d9c7f6 100644 --- a/megapixels/app/site/parser.py +++ b/megapixels/app/site/parser.py @@ -11,6 +11,10 @@ renderer = mistune.Renderer(escape=False) markdown = mistune.Markdown(renderer=renderer) def fix_images(lines, s3_path): + """ + do our own tranformation of the markdown around images to handle wide images etc + lines: markdown lines + """ real_lines = [] block = "\n\n".join(lines) for line in block.split("\n"): @@ -29,6 +33,9 @@ def fix_images(lines, s3_path): return "\n".join(real_lines) def format_section(lines, s3_path, type=''): + """ + format a normal markdown section + """ if len(lines): lines = fix_images(lines, s3_path) if type: @@ -38,13 +45,16 @@ def format_section(lines, s3_path, type=''): return "" def format_metadata(section): + """ + format a metadata section (+ key: value pairs) + """ meta = [] for line in section.split('\n'): key, value = line[2:].split(': ', 1) meta.append("
    {}
    {}
    ".format(key, value)) return "
    {}
    ".format(''.join(meta)) -def format_applet(section): +def format_applet(section, s3_path): payload = section.replace('```', '').strip().split('\n') applet = {} if ': ' in payload[0]: @@ -56,10 +66,15 @@ def format_applet(section): if opt: applet['opt'] = opt if command == 'load file': + if opt[0] != '/': + applet['opt'] = s3_path + opt applet['fields'] = payload[1] return "
    ".format(json.dumps(applet)) def parse_markdown(sections, s3_path, skip_h1=False): + """ + parse page into sections, preprocess the markdown to handle our modifications + """ groups = [] current_group = [] for section in sections: @@ -67,7 +82,7 @@ def parse_markdown(sections, s3_path, skip_h1=False): continue elif section.startswith('```'): groups.append(format_section(current_group, s3_path)) - groups.append(format_applet(section)) + groups.append(format_applet(section, s3_path)) current_group = [] elif section.startswith('+ '): groups.append(format_section(current_group, s3_path)) @@ -88,6 +103,9 @@ def parse_markdown(sections, s3_path, skip_h1=False): return content def parse_research_index(research_posts): + """ + Generate an index file for the research pages + """ content = "
    " for post in research_posts: s3_path = s3.make_s3_path(cfg.S3_SITE_PATH, post['path']) @@ -105,6 +123,9 @@ def parse_research_index(research_posts): return content def read_metadata(fn): + """ + Read in read a markdown file and extract the metadata + """ with open(fn, "r") as file: data = file.read() data = data.replace("\n ", "\n") @@ -128,6 +149,9 @@ default_metadata = { } def parse_metadata_section(metadata, section): + """ + parse a metadata key: value pair + """ for line in section.split("\n"): if ': ' not in line: continue @@ -135,6 +159,11 @@ def parse_metadata_section(metadata, section): metadata[key.lower()] = value def parse_metadata(fn, sections): + """ + parse the metadata headers in a markdown file + (everything before the second ---------) + also generates appropriate urls for this page :) + """ found_meta = False metadata = {} valid_sections = [] @@ -175,6 +204,9 @@ def parse_metadata(fn, sections): return metadata, valid_sections def read_research_post_index(): + """ + Generate an index of the research (blog) posts + """ posts = [] for fn in sorted(glob.glob('../site/content/research/*/index.md')): metadata, valid_sections = read_metadata(fn) diff --git a/megapixels/app/site/s3.py b/megapixels/app/site/s3.py index 99726a4d..5464d464 100644 --- a/megapixels/app/site/s3.py +++ b/megapixels/app/site/s3.py @@ -3,6 +3,9 @@ import glob import boto3 def sync_directory(base_fn, s3_path, metadata): + """ + Synchronize a local assets folder with S3 + """ fns = {} for fn in glob.glob(os.path.join(base_fn, 'assets/*')): fns[os.path.basename(fn)] = True diff --git a/site/assets/css/applets.css b/site/assets/css/applets.css index b437886b..e106b3dd 100644 --- a/site/assets/css/applets.css +++ b/site/assets/css/applets.css @@ -1,4 +1,2 @@ .applet { - border: 1px solid #0f0; - color: #0f0; } diff --git a/site/assets/css/tabulator.css b/site/assets/css/tabulator.css new file mode 100755 index 00000000..8c123cf4 --- /dev/null +++ b/site/assets/css/tabulator.css @@ -0,0 +1,761 @@ +/* Tabulator v4.1.3 (c) Oliver Folkerd */ +.tabulator { + position: relative; + font-size: 14px; + text-align: left; + overflow: hidden; + -ms-transform: translatez(0); + transform: translatez(0); +} + +.tabulator[tabulator-layout="fitDataFill"] .tabulator-tableHolder .tabulator-table { + min-width: 100%; +} + +.tabulator.tabulator-block-select { + -webkit-user-select: none; + -moz-user-select: none; + -ms-user-select: none; + user-select: none; +} + +.tabulator .tabulator-header { + position: relative; + box-sizing: border-box; + width: 100%; + border-bottom: 1px solid #999; + color: #ddd; + white-space: nowrap; + overflow: hidden; + -moz-user-select: none; + -khtml-user-select: none; + -webkit-user-select: none; + -o-user-select: none; +} + +.tabulator .tabulator-header .tabulator-col { + display: inline-block; + position: relative; + box-sizing: border-box; + border-right: 1px solid #aaa; + text-align: left; + vertical-align: bottom; + overflow: hidden; +} + +.tabulator .tabulator-header .tabulator-col.tabulator-moving { + position: absolute; + border: 1px solid #999; + background: rgba(80,20,10,0.2); + pointer-events: none; +} + +.tabulator .tabulator-header .tabulator-col .tabulator-col-content { + box-sizing: border-box; + position: relative; + padding: 4px; +} + +.tabulator .tabulator-header .tabulator-col .tabulator-col-content .tabulator-col-title { + box-sizing: border-box; + width: 100%; + white-space: nowrap; + overflow: hidden; + text-overflow: ellipsis; + vertical-align: bottom; +} + +.tabulator .tabulator-header .tabulator-col .tabulator-col-content .tabulator-col-title .tabulator-title-editor { + box-sizing: border-box; + width: 100%; + border: 1px solid #999; + padding: 1px; + background: #fff; +} + +.tabulator .tabulator-header .tabulator-col .tabulator-col-content .tabulator-arrow { + display: inline-block; + position: absolute; + top: 9px; + right: 8px; + width: 0; + height: 0; + border-left: 6px solid transparent; + border-right: 6px solid transparent; + border-bottom: 6px solid #bbb; +} + +.tabulator .tabulator-header .tabulator-col.tabulator-col-group .tabulator-col-group-cols { + position: relative; + display: -ms-flexbox; + display: flex; + border-top: 1px solid #aaa; + overflow: hidden; +} + +.tabulator .tabulator-header .tabulator-col.tabulator-col-group .tabulator-col-group-cols .tabulator-col:last-child { + margin-right: -1px; +} + +.tabulator .tabulator-header .tabulator-col:first-child .tabulator-col-resize-handle.prev { + display: none; +} + +.tabulator .tabulator-header .tabulator-col.ui-sortable-helper { + position: absolute; + border: 1px solid #aaa; +} + +.tabulator .tabulator-header .tabulator-col .tabulator-header-filter { + position: relative; + box-sizing: border-box; + margin-top: 2px; + width: 100%; + text-align: center; +} + +.tabulator .tabulator-header .tabulator-col .tabulator-header-filter textarea { + height: auto !important; +} + +.tabulator .tabulator-header .tabulator-col .tabulator-header-filter svg { + margin-top: 3px; +} + +.tabulator .tabulator-header .tabulator-col .tabulator-header-filter input::-ms-clear { + width: 0; + height: 0; +} + +.tabulator .tabulator-header .tabulator-col.tabulator-sortable .tabulator-col-title { + padding-right: 25px; +} + +.tabulator .tabulator-header .tabulator-col.tabulator-sortable:hover { + cursor: pointer; + background-color: #cdcdcd; +} + +.tabulator .tabulator-header .tabulator-col.tabulator-sortable[aria-sort="none"] .tabulator-col-content .tabulator-arrow { + border-top: none; + border-bottom: 6px solid #bbb; +} + +.tabulator .tabulator-header .tabulator-col.tabulator-sortable[aria-sort="asc"] .tabulator-col-content .tabulator-arrow { + border-top: none; + border-bottom: 6px solid #666; +} + +.tabulator .tabulator-header .tabulator-col.tabulator-sortable[aria-sort="desc"] .tabulator-col-content .tabulator-arrow { + border-top: 6px solid #666; + border-bottom: none; +} + +.tabulator .tabulator-header .tabulator-col.tabulator-col-vertical .tabulator-col-content .tabulator-col-title { + -webkit-writing-mode: vertical-rl; + -ms-writing-mode: tb-rl; + writing-mode: vertical-rl; + text-orientation: mixed; + display: -ms-flexbox; + display: flex; + -ms-flex-align: center; + align-items: center; + -ms-flex-pack: center; + justify-content: center; +} + +.tabulator .tabulator-header .tabulator-col.tabulator-col-vertical.tabulator-col-vertical-flip .tabulator-col-title { + -ms-transform: rotate(180deg); + transform: rotate(180deg); +} + +.tabulator .tabulator-header .tabulator-col.tabulator-col-vertical.tabulator-sortable .tabulator-col-title { + padding-right: 0; + padding-top: 20px; +} + +.tabulator .tabulator-header .tabulator-col.tabulator-col-vertical.tabulator-sortable.tabulator-col-vertical-flip .tabulator-col-title { + padding-right: 0; + padding-bottom: 20px; +} + +.tabulator .tabulator-header .tabulator-col.tabulator-col-vertical.tabulator-sortable .tabulator-arrow { + right: calc(50% - 6px); +} + +.tabulator .tabulator-header .tabulator-frozen { + display: inline-block; + position: absolute; + z-index: 10; +} + +.tabulator .tabulator-header .tabulator-frozen.tabulator-frozen-left { + border-right: 2px solid #aaa; +} + +.tabulator .tabulator-header .tabulator-frozen.tabulator-frozen-right { + border-left: 2px solid #aaa; +} + +.tabulator .tabulator-header .tabulator-calcs-holder { + box-sizing: border-box; + min-width: 400%; + background: rgba(80,20,10,0.2); + border-top: 1px solid #aaa; + border-bottom: 1px solid #aaa; + overflow: hidden; +} + +.tabulator .tabulator-header .tabulator-calcs-holder .tabulator-row { + background: rgba(80,20,10,0.2); +} + +.tabulator .tabulator-header .tabulator-calcs-holder .tabulator-row .tabulator-col-resize-handle { + display: none; +} + +.tabulator .tabulator-header .tabulator-frozen-rows-holder { + min-width: 400%; +} + +.tabulator .tabulator-header .tabulator-frozen-rows-holder:empty { + display: none; +} + +.tabulator .tabulator-tableHolder { + position: relative; + width: 100%; + white-space: nowrap; + overflow: auto; + -webkit-overflow-scrolling: touch; +} + +.tabulator .tabulator-tableHolder:focus { + outline: none; +} + +.tabulator .tabulator-tableHolder .tabulator-placeholder { + box-sizing: border-box; + display: -ms-flexbox; + display: flex; + -ms-flex-align: center; + align-items: center; + width: 100%; +} + +.tabulator .tabulator-tableHolder .tabulator-placeholder[tabulator-render-mode="virtual"] { + position: absolute; + top: 0; + left: 0; + height: 100%; +} + +.tabulator .tabulator-tableHolder .tabulator-placeholder span { + display: inline-block; + margin: 0 auto; + padding: 10px; + color: #ccc; + font-weight: bold; + font-size: 20px; +} + +.tabulator .tabulator-tableHolder .tabulator-table { + position: relative; + display: inline-block; + background-color: #fff; + white-space: nowrap; + overflow: visible; + color: #333; +} + +.tabulator .tabulator-tableHolder .tabulator-table .tabulator-row.tabulator-calcs { + font-weight: bold; + background: rgba(80,20,10,0.2); +} + +.tabulator .tabulator-tableHolder .tabulator-table .tabulator-row.tabulator-calcs.tabulator-calcs-top { + border-bottom: 2px solid #aaa; +} + +.tabulator .tabulator-tableHolder .tabulator-table .tabulator-row.tabulator-calcs.tabulator-calcs-bottom { + border-top: 2px solid #aaa; +} + +.tabulator .tabulator-footer { + padding: 5px 10px; + border-top: 1px solid #999; + text-align: right; + color: #555; + font-weight: bold; + white-space: nowrap; + -ms-user-select: none; + user-select: none; + -moz-user-select: none; + -khtml-user-select: none; + -webkit-user-select: none; + -o-user-select: none; +} + +.tabulator .tabulator-footer .tabulator-calcs-holder { + box-sizing: border-box; + width: calc(100% + 20px); + margin: -5px -10px 5px -10px; + text-align: left; + background: rgba(80,20,10,0.2); + border-bottom: 1px solid #aaa; + border-top: 1px solid #aaa; + overflow: hidden; +} + +.tabulator .tabulator-footer .tabulator-calcs-holder .tabulator-row { + background: rgba(80,20,10,0.2); +} + +.tabulator .tabulator-footer .tabulator-calcs-holder .tabulator-row .tabulator-col-resize-handle { + display: none; +} + +.tabulator .tabulator-footer .tabulator-calcs-holder:only-child { + margin-bottom: -5px; + border-bottom: none; +} + +.tabulator .tabulator-footer .tabulator-pages { + margin: 0 7px; +} + +.tabulator .tabulator-footer .tabulator-page { + display: inline-block; + margin: 0 2px; + padding: 2px 5px; + border: 1px solid #aaa; + border-radius: 3px; + background: rgba(255, 255, 255, 0.2); + color: #555; + font-family: inherit; + font-weight: inherit; + font-size: inherit; +} + +.tabulator .tabulator-footer .tabulator-page.active { + color: #d00; +} + +.tabulator .tabulator-footer .tabulator-page:disabled { + opacity: .5; +} + +.tabulator .tabulator-footer .tabulator-page:not(.disabled):hover { + cursor: pointer; + background: rgba(0, 0, 0, 0.2); + color: #fff; +} + +.tabulator .tabulator-col-resize-handle { + position: absolute; + right: 0; + top: 0; + bottom: 0; + width: 5px; +} + +.tabulator .tabulator-col-resize-handle.prev { + left: 0; + right: auto; +} + +.tabulator .tabulator-col-resize-handle:hover { + cursor: ew-resize; +} + +.tabulator .tabulator-loader { + position: absolute; + display: -ms-flexbox; + display: flex; + -ms-flex-align: center; + align-items: center; + top: 0; + left: 0; + z-index: 100; + height: 100%; + width: 100%; + background: rgba(0, 0, 0, 0.4); + text-align: center; +} + +.tabulator .tabulator-loader .tabulator-loader-msg { + display: inline-block; + margin: 0 auto; + padding: 10px 20px; + border-radius: 4px; + background: #fff; + font-weight: bold; + font-size: 16px; +} + +.tabulator .tabulator-loader .tabulator-loader-msg.tabulator-loading { + border: 4px solid #333; + color: #000; +} + +.tabulator .tabulator-loader .tabulator-loader-msg.tabulator-error { + color: #000; +} + +.tabulator-row { + position: relative; + box-sizing: border-box; + min-height: 22px; + background-color: #fff; +} + +.tabulator-row.tabulator-row-even { + background-color: #EFEFEF; +} + +.tabulator-row.tabulator-selectable:hover { + background-color: #bbb; + cursor: pointer; +} + +.tabulator-row.tabulator-selected { + background-color: #9ABCEA; +} + +.tabulator-row.tabulator-selected:hover { + background-color: #769BCC; + cursor: pointer; +} + +.tabulator-row.tabulator-row-moving { + border: 1px solid #000; + background: #fff; +} + +.tabulator-row.tabulator-moving { + position: absolute; + border-top: 1px solid #aaa; + border-bottom: 1px solid #aaa; + pointer-events: none; + z-index: 15; +} + +.tabulator-row .tabulator-row-resize-handle { + position: absolute; + right: 0; + bottom: 0; + left: 0; + height: 5px; +} + +.tabulator-row .tabulator-row-resize-handle.prev { + top: 0; + bottom: auto; +} + +.tabulator-row .tabulator-row-resize-handle:hover { + cursor: ns-resize; +} + +.tabulator-row .tabulator-frozen { + display: inline-block; + position: absolute; + background-color: inherit; + z-index: 10; +} + +.tabulator-row .tabulator-frozen.tabulator-frozen-left { + border-right: 2px solid #aaa; +} + +.tabulator-row .tabulator-frozen.tabulator-frozen-right { + border-left: 2px solid #aaa; +} + +.tabulator-row .tabulator-responsive-collapse { + box-sizing: border-box; + padding: 5px; + border-top: 1px solid #aaa; + border-bottom: 1px solid #aaa; +} + +.tabulator-row .tabulator-responsive-collapse:empty { + display: none; +} + +.tabulator-row .tabulator-responsive-collapse table { + font-size: 14px; +} + +.tabulator-row .tabulator-responsive-collapse table tr td { + position: relative; +} + +.tabulator-row .tabulator-responsive-collapse table tr td:first-of-type { + padding-right: 10px; +} + +.tabulator-row .tabulator-cell { + display: inline-block; + position: relative; + box-sizing: border-box; + padding: 4px; + border-right: 1px solid #aaa; + vertical-align: middle; + white-space: nowrap; + overflow: hidden; + text-overflow: ellipsis; +} + +.tabulator-row .tabulator-cell.tabulator-editing { + border: 1px solid #1D68CD; + padding: 0; +} + +.tabulator-row .tabulator-cell.tabulator-editing input, .tabulator-row .tabulator-cell.tabulator-editing select { + border: 1px; + background: transparent; +} + +.tabulator-row .tabulator-cell.tabulator-validation-fail { + border: 1px solid #dd0000; +} + +.tabulator-row .tabulator-cell.tabulator-validation-fail input, .tabulator-row .tabulator-cell.tabulator-validation-fail select { + border: 1px; + background: transparent; + color: #dd0000; +} + +.tabulator-row .tabulator-cell:first-child .tabulator-col-resize-handle.prev { + display: none; +} + +.tabulator-row .tabulator-cell.tabulator-row-handle { + display: -ms-inline-flexbox; + display: inline-flex; + -ms-flex-align: center; + align-items: center; + -ms-flex-pack: center; + justify-content: center; + -moz-user-select: none; + -khtml-user-select: none; + -webkit-user-select: none; + -o-user-select: none; +} + +.tabulator-row .tabulator-cell.tabulator-row-handle .tabulator-row-handle-box { + width: 80%; +} + +.tabulator-row .tabulator-cell.tabulator-row-handle .tabulator-row-handle-box .tabulator-row-handle-bar { + width: 100%; + height: 3px; + margin-top: 2px; + background: #666; +} + +.tabulator-row .tabulator-cell .tabulator-data-tree-branch { + display: inline-block; + vertical-align: middle; + height: 9px; + width: 7px; + margin-top: -9px; + margin-right: 5px; + border-bottom-left-radius: 1px; + border-left: 2px solid #aaa; + border-bottom: 2px solid #aaa; +} + +.tabulator-row .tabulator-cell .tabulator-data-tree-control { + display: -ms-inline-flexbox; + display: inline-flex; + -ms-flex-pack: center; + justify-content: center; + -ms-flex-align: center; + align-items: center; + vertical-align: middle; + height: 11px; + width: 11px; + margin-right: 5px; + border: 1px solid #333; + border-radius: 2px; + background: rgba(0, 0, 0, 0.1); + overflow: hidden; +} + +.tabulator-row .tabulator-cell .tabulator-data-tree-control:hover { + cursor: pointer; + background: rgba(0, 0, 0, 0.2); +} + +.tabulator-row .tabulator-cell .tabulator-data-tree-control .tabulator-data-tree-control-collapse { + display: inline-block; + position: relative; + height: 7px; + width: 1px; + background: transparent; +} + +.tabulator-row .tabulator-cell .tabulator-data-tree-control .tabulator-data-tree-control-collapse:after { + position: absolute; + content: ""; + left: -3px; + top: 3px; + height: 1px; + width: 7px; + background: #333; +} + +.tabulator-row .tabulator-cell .tabulator-data-tree-control .tabulator-data-tree-control-expand { + display: inline-block; + position: relative; + height: 7px; + width: 1px; + background: #333; +} + +.tabulator-row .tabulator-cell .tabulator-data-tree-control .tabulator-data-tree-control-expand:after { + position: absolute; + content: ""; + left: -3px; + top: 3px; + height: 1px; + width: 7px; + background: #333; +} + +.tabulator-row .tabulator-cell .tabulator-responsive-collapse-toggle { + display: -ms-inline-flexbox; + display: inline-flex; + -ms-flex-align: center; + align-items: center; + -ms-flex-pack: center; + justify-content: center; + -moz-user-select: none; + -khtml-user-select: none; + -webkit-user-select: none; + -o-user-select: none; + height: 15px; + width: 15px; + border-radius: 4px; + background: #666; + color: #fff; + font-weight: bold; + font-size: 1.1em; +} + +.tabulator-row .tabulator-cell .tabulator-responsive-collapse-toggle:hover { + opacity: .7; +} + +.tabulator-row .tabulator-cell .tabulator-responsive-collapse-toggle.open .tabulator-responsive-collapse-toggle-close { + display: initial; +} + +.tabulator-row .tabulator-cell .tabulator-responsive-collapse-toggle.open .tabulator-responsive-collapse-toggle-open { + display: none; +} + +.tabulator-row .tabulator-cell .tabulator-responsive-collapse-toggle .tabulator-responsive-collapse-toggle-close { + display: none; +} + +.tabulator-row.tabulator-group { + box-sizing: border-box; + border-bottom: 1px solid #999; + border-right: 1px solid #aaa; + border-top: 1px solid #999; + padding: 5px; + padding-left: 10px; + background: #ccc; + font-weight: bold; + min-width: 100%; +} + +.tabulator-row.tabulator-group:hover { + cursor: pointer; + background-color: rgba(0, 0, 0, 0.1); +} + +.tabulator-row.tabulator-group.tabulator-group-visible .tabulator-arrow { + margin-right: 10px; + border-left: 6px solid transparent; + border-right: 6px solid transparent; + border-top: 6px solid #666; + border-bottom: 0; +} + +.tabulator-row.tabulator-group.tabulator-group-level-1 .tabulator-arrow { + margin-left: 20px; +} + +.tabulator-row.tabulator-group.tabulator-group-level-2 .tabulator-arrow { + margin-left: 40px; +} + +.tabulator-row.tabulator-group.tabulator-group-level-3 .tabulator-arrow { + margin-left: 60px; +} + +.tabulator-row.tabulator-group.tabulator-group-level-4 .tabulator-arrow { + margin-left: 80px; +} + +.tabulator-row.tabulator-group.tabulator-group-level-5 .tabulator-arrow { + margin-left: 100px; +} + +.tabulator-row.tabulator-group .tabulator-arrow { + display: inline-block; + width: 0; + height: 0; + margin-right: 16px; + border-top: 6px solid transparent; + border-bottom: 6px solid transparent; + border-right: 0; + border-left: 6px solid #666; + vertical-align: middle; +} + +.tabulator-row.tabulator-group span { + margin-left: 10px; + color: #d00; +} + +.tabulator-edit-select-list { + position: absolute; + display: inline-block; + box-sizing: border-box; + max-height: 200px; + background: #fff; + border: 1px solid #aaa; + font-size: 14px; + overflow-y: auto; + -webkit-overflow-scrolling: touch; + z-index: 10000; +} + +.tabulator-edit-select-list .tabulator-edit-select-list-item { + padding: 4px; + color: #333; +} + +.tabulator-edit-select-list .tabulator-edit-select-list-item.active { + color: #fff; + background: #1D68CD; +} + +.tabulator-edit-select-list .tabulator-edit-select-list-item:hover { + cursor: pointer; + color: #fff; + background: #1D68CD; +} + +.tabulator-edit-select-list .tabulator-edit-select-list-group { + border-bottom: 1px solid #aaa; + padding: 4px; + padding-top: 6px; + color: #333; + font-weight: bold; +} diff --git a/site/public/about/credits/index.html b/site/public/about/credits/index.html index 1b1f4d54..67e9dcb8 100644 --- a/site/public/about/credits/index.html +++ b/site/public/about/credits/index.html @@ -8,6 +8,7 @@ + @@ -19,7 +20,6 @@ The Darkside of Datasets
    Title
    diff --git a/site/public/datasets/vgg_face2/index.html b/site/public/datasets/vgg_face2/index.html index 63bb2cc0..46242505 100644 --- a/site/public/datasets/vgg_face2/index.html +++ b/site/public/datasets/vgg_face2/index.html @@ -8,6 +8,7 @@ + @@ -19,7 +20,6 @@ The Darkside of Datasets