diff options
| author | adamhrv <adam@ahprojects.com> | 2019-10-08 16:02:47 +0200 |
|---|---|---|
| committer | adamhrv <adam@ahprojects.com> | 2019-10-08 16:02:47 +0200 |
| commit | 27340ac4cd43f8eec7414495b541a65566ae2656 (patch) | |
| tree | cd43fcf1af026c75e6045d71d7d783ec460ba3ee | |
| parent | a4ea2852f4b46566a61f988342aa04e4059ccef9 (diff) | |
update site, white
42 files changed, 774 insertions, 508 deletions
diff --git a/TODO.md b/TODO.md new file mode 100644 index 00000000..90de6790 --- /dev/null +++ b/TODO.md @@ -0,0 +1,14 @@ +# TODO + +## CSS + +- change font size in Tabulator to 12px (can't find where to edit it) + + +## Charts, JS + +- can we make the age/gender all in one include? +- can we auto-add download links to age/gender csv? +- can the pie chart labels keep same order as in CSV? + + diff --git a/client/chart/chart.css b/client/chart/chart.css index f9c33247..2df5a97a 100644 --- a/client/chart/chart.css +++ b/client/chart/chart.css @@ -1,22 +1,24 @@ .chart text { - fill: white; + fill: black; } .chart line { - stroke: white; + stroke: black; } .chart path { - stroke: white; + stroke: black; } .c3 path, .c3 line { - stroke: white; + stroke: black; } .c3-tooltip, .c3-tooltip td { background: rgba(0,0,0,0.8); + color: #fff; } .c3-tooltip th { font-family: 'Roboto', sans-serif; - background: black; -} + background: rgba(255,255,255,0.0); + /*background: black;*/ +}
\ No newline at end of file diff --git a/client/modalImage/modal.css b/client/modalImage/modal.css index d628cc48..cc9a1f32 100644 --- a/client/modalImage/modal.css +++ b/client/modalImage/modal.css @@ -31,7 +31,7 @@ .modal .caption { display: block; text-align: center; - background: black; + /*background: black;*/ padding: 10px; } .modal .prev span, diff --git a/client/table/tabulator.css b/client/table/tabulator.css index 0ea81974..0d9e8ff1 100644 --- a/client/table/tabulator.css +++ b/client/table/tabulator.css @@ -1,6 +1,7 @@ .tabulator { border-left: 1px solid #333; border-bottom: 1px solid #333; + font-size: 12px; } .tabulator-row.tabulator-row-odd { background-color: #222; @@ -42,7 +43,7 @@ background-image: url(/assets/img/icon-search.png); background-position: 378px center; background-repeat: no-repeat; - box-shadow: 0px 2px 4px rgba(0,0,0,0.2); + box-shadow: 1px 2px 4px rgba(0,0,0,0.6); border: 0; } @@ -55,10 +56,10 @@ } .download { display: block; - font-size: 13px; - color: #ddd; + font-size: 12px; + color: #333; cursor: pointer; - background: #333; + background: #ddd; padding: 5px 8px 5px 8px; border-radius: 5px; transition: all 0.2s; diff --git a/environment.yml b/environment.yml deleted file mode 100644 index b3d28c7e..00000000 --- a/environment.yml +++ /dev/null @@ -1,153 +0,0 @@ -name: megapixels -channels: - - pytorch - - conda-forge - - defaults -dependencies: - - atk=2.25.90=hb9dd440_1002 - - attrs=18.2.0=py_0 - - backcall=0.1.0=py_0 - - blas=1.0=mkl - - bleach=3.1.0=py_0 - - ca-certificates=2018.11.29=ha4d7672_0 - - cairo=1.16.0=ha4e643d_1000 - - certifi=2018.11.29=py36_1000 - - cffi=1.11.5=py36h9745a5d_1001 - - cudatoolkit=9.0=h13b8566_0 - - dbus=1.13.0=h4e0c4b3_1000 - - decorator=4.3.2=py_0 - - entrypoints=0.3=py36_1000 - - expat=2.2.5=hf484d3e_1002 - - fontconfig=2.13.1=h2176d3f_1000 - - freetype=2.9.1=h94bbf69_1005 - - gdk-pixbuf=2.36.12=h49783d7_1002 - - gettext=0.19.8.1=h9745a5d_1001 - - glib=2.58.2=hf63aee3_1001 - - gobject-introspection=1.58.2=py36h2da5eee_1000 - - graphite2=1.3.13=hf484d3e_1000 - - gstreamer=1.14.4=h66beb1c_1001 - - gtk2=2.24.31=hb68c50a_1001 - - harfbuzz=2.3.1=h6824563_0 - - icu=58.2=hf484d3e_1000 - - ipykernel=5.1.0=py36h24bf2e0_1002 - - ipython=7.2.0=py36h24bf2e0_1000 - - ipython_genutils=0.2.0=py_1 - - ipywidgets=7.4.2=py_0 - - jedi=0.13.2=py36_1000 - - jinja2=2.10=py_1 - - jpeg=9c=h14c3975_1001 - - jsonschema=3.0.0a3=py36_1000 - - jupyter=1.0.0=py_1 - - jupyter_client=5.2.4=py_1 - - jupyter_console=6.0.0=py_0 - - jupyter_core=4.4.0=py_0 - - libffi=3.2.1=hf484d3e_1005 - - libgcc-ng=7.3.0=hdf63c60_0 - - libgfortran-ng=7.2.0=hdf63c60_3 - - libiconv=1.15=h14c3975_1004 - - libpng=1.6.36=h84994c4_1000 - - libsodium=1.0.16=h14c3975_1001 - - libstdcxx-ng=7.3.0=hdf63c60_0 - - libtiff=4.0.10=h648cc4a_1001 - - libuuid=2.32.1=h14c3975_1000 - - libxcb=1.13=h14c3975_1002 - - libxml2=2.9.8=h143f9aa_1005 - - markupsafe=1.1.0=py36h14c3975_1000 - - mistune=0.8.4=py36h14c3975_1000 - - mkl_fft=1.0.10=py36h14c3975_1 - - mkl_random=1.0.2=py36h637b7d7_2 - - nb_conda=2.2.1=py36_0 - - nb_conda_kernels=2.2.0=py36_1000 - - nbconvert=5.3.1=py_1 - - nbformat=4.4.0=py_1 - - ncurses=6.1=hf484d3e_1002 - - ninja=1.9.0=h6bb024c_0 - - notebook=5.7.4=py36_1000 - - numpy-base=1.15.4=py36hde5b4d6_0 - - olefile=0.46=py_0 - - openssl=1.1.1a=h14c3975_1000 - - pandoc=2.6=1 - - pandocfilters=1.4.2=py_1 - - pango=1.40.14=h4ea9474_1004 - - parso=0.3.3=py_0 - - pcre=8.41=hf484d3e_1003 - - pexpect=4.6.0=py36_1000 - - pickleshare=0.7.5=py36_1000 - - pip=19.0.2=py36_0 - - pixman=0.34.0=h14c3975_1003 - - prometheus_client=0.5.0=py_0 - - prompt_toolkit=2.0.8=py_0 - - pthread-stubs=0.4=h14c3975_1001 - - ptyprocess=0.6.0=py36_1000 - - pycparser=2.19=py_0 - - pygments=2.3.1=py_0 - - pyqt=4.11.4=py36_3 - - pyrsistent=0.14.10=py36h14c3975_0 - - python=3.6.8=h0371630_0 - - python-dateutil=2.8.0=py_0 - - pytorch=1.0.1=py3.6_cuda9.0.176_cudnn7.4.2_2 - - pyzmq=17.1.2=py36h6afc9c9_1001 - - qt=4.8.7=2 - - qtconsole=4.4.3=py_0 - - readline=7.0=hf8c457e_1001 - - send2trash=1.5.0=py_0 - - setuptools=40.7.3=py36_0 - - sip=4.18=py36_1 - - six=1.12.0=py36_1000 - - sqlite=3.26.0=h67949de_1000 - - terminado=0.8.1=py36_1001 - - testpath=0.4.2=py36_1000 - - tk=8.6.9=h84994c4_1000 - - torchvision=0.2.1=py_2 - - tornado=5.1.1=py36h14c3975_1000 - - traitlets=4.3.2=py36_1000 - - wcwidth=0.1.7=py_1 - - webencodings=0.5.1=py_1 - - wheel=0.32.3=py36_0 - - widgetsnbextension=3.4.2=py36_1000 - - xorg-kbproto=1.0.7=h14c3975_1002 - - xorg-libice=1.0.9=h14c3975_1004 - - xorg-libsm=1.2.3=h4937e3b_1000 - - xorg-libx11=1.6.7=h14c3975_1000 - - xorg-libxau=1.0.8=h14c3975_1006 - - xorg-libxdmcp=1.1.2=h14c3975_1007 - - xorg-libxext=1.3.3=h14c3975_1004 - - xorg-libxrender=0.9.10=h14c3975_1002 - - xorg-libxt=1.1.5=h14c3975_1002 - - xorg-renderproto=0.11.1=h14c3975_1002 - - xorg-xextproto=7.3.0=h14c3975_1002 - - xorg-xproto=7.0.31=h14c3975_1007 - - xz=5.2.4=h14c3975_1001 - - zeromq=4.2.5=hf484d3e_1006 - - zlib=1.2.11=h14c3975_1004 - - pip: - - click==7.0 - - cloudpickle==0.7.0 - - cmake==3.13.3 - - colorlog==4.0.2 - - cycler==0.10.0 - - dask==1.1.1 - - dlib==19.16.0 - - imagehash==4.0 - - imutils==0.5.2 - - intel-openmp==2019.0 - - kiwisolver==1.0.1 - - matplotlib==3.0.2 - - mkl==2019.0 - - networkx==2.2 - - numpy==1.16.1 - - opencv-python==4.0.0.21 - - pandas==0.24.1 - - pillow==5.4.1 - - pymediainfo==3.0 - - pyparsing==2.3.1 - - python-dotenv==0.10.1 - - pytz==2018.9 - - pywavelets==1.0.1 - - scikit-image==0.14.2 - - scikit-learn==0.20.2 - - scipy==1.2.1 - - toolz==0.9.0 - - tqdm==4.31.0 -prefix: /home/adam/anaconda3/envs/megapixels - diff --git a/megapixels/app/models/bbox.py b/megapixels/app/models/bbox.py index 8ecc8971..c840ea1b 100644 --- a/megapixels/app/models/bbox.py +++ b/megapixels/app/models/bbox.py @@ -207,11 +207,21 @@ class BBox: # ----------------------------------------------------------------- # Convert to - def to_square(self, bounds): + def to_square(self): '''Forces bbox to square dimensions - :param bounds: (int, int) w, h of the image :returns (BBox) in square ratio ''' + if self._width > self._height: + delta = (self._width - self._height) / 2 + self._y1 -= delta + self._y2 += delta + elif self._height > self._width: + delta = (self._height - self._width) / 2 + self._x1 -= delta + self._x2 += delta + return BBox(self._x1, self._y1, self._x2, self._y2) + + def to_dim(self, dim): """scale is (w, h) is tuple of dimensions""" diff --git a/megapixels/app/models/dataset.py b/megapixels/app/models/dataset.py index a7227a70..c908da1b 100644 --- a/megapixels/app/models/dataset.py +++ b/megapixels/app/models/dataset.py @@ -152,6 +152,8 @@ class Dataset: image_records = [] # list of image matches w/identity if available # find most similar feature vectors indexes #match_idxs = self.similar(query_vec, n_results, threshold) + + # TODO: add cosine similarity sim_scores = np.linalg.norm(np.array([query_vec]) - np.array(self._face_vectors), axis=1) match_idxs = np.argpartition(sim_scores, range(n_results))[:n_results] @@ -180,7 +182,17 @@ class Dataset: s3_url = self.data_store_s3.face(ds_record.uuid) bbox_norm = BBox.from_xywh_norm_dim(ds_roi.x, ds_roi.y, ds_roi.w, ds_roi.h, dim) self.log.debug(f'bbox_norm: {bbox_norm}') - score = sim_scores[match_idx] + self.log.debug(f'match_idx: {match_idx}, record_idx: {record_idx}, roi_index: {roi_index}, len sim_scores: {len(sim_scores)}') + try: + score = sim_scores[match_idx] + except Exception as e: + self.log.error(e) + try: + score = sim_scores[record_idx] + except Exception as e: + self.log.error(e) + + if types.Metadata.IDENTITY in self._metadata.keys(): ds_id = df_identity.loc[df_identity['identity_key'] == ds_record.identity_key].iloc[0] diff --git a/megapixels/app/site/parser.py b/megapixels/app/site/parser.py index 3700efd1..6ab8c700 100644 --- a/megapixels/app/site/parser.py +++ b/megapixels/app/site/parser.py @@ -163,6 +163,35 @@ def intro_section(metadata, s3_path): """ section = "<section class='intro_section' style='background-image: url({})'>".format(s3_path + metadata['image']) + # section += "<div class='inner'>" + + # parts = [] + # if 'desc' in metadata: + # desc = metadata['desc'] + # # colorize the first instance of the database name in the header + # if 'color' in metadata and metadata['title'] in desc: + # desc = desc.replace(metadata['title'], "<span style='color: {}'>{}</span>".format(metadata['color'], metadata['title']), 1) + # section += "<div class='hero_desc'><span class='bgpad'>{}</span></div>".format(desc, desc) + + # if 'subdesc' in metadata: + # subdesc = markdown(metadata['subdesc']).replace('<p>', '').replace('</p>', '') + # section += "<div class='hero_subdesc'><span class='bgpad'>{}</span></div>".format(subdesc, subdesc) + + # section += "</div>" + section += "</section>" + + if 'caption' in metadata: + section += "<section><div class='image'><div class='intro-caption caption'>{}</div></div></section>".format(metadata['caption']) + + return section + + +def intro_section_v1(metadata, s3_path): + """ + Build the intro section for datasets + """ + + section = "<section class='intro_section' style='background-image: url({})'>".format(s3_path + metadata['image']) section += "<div class='inner'>" parts = [] @@ -185,7 +214,6 @@ def intro_section(metadata, s3_path): return section - def fix_images(lines, s3_path): """ do our own transformation of the markdown around images to handle wide images etc diff --git a/megapixels/app/utils/draw_utils.py b/megapixels/app/utils/draw_utils.py index 7044a62f..1836768b 100644 --- a/megapixels/app/utils/draw_utils.py +++ b/megapixels/app/utils/draw_utils.py @@ -3,8 +3,10 @@ from math import sqrt import numpy as np import cv2 as cv +import PIL +from PIL import ImageDraw -from app.utils import logger_utils +from app.utils import logger_utils, im_utils log = logger_utils.Logger.getLogger() @@ -118,6 +120,22 @@ def draw_landmarks2D(im, points_norm, radius=3, color=(0,255,0)): cv.circle(im_dst, pt, radius, color, -1, cv.LINE_AA) return im_dst +def draw_landmarks2D_pil(im, points_norm, radius=3, color=(0,255,0)): + '''Draws facial landmarks, either 5pt or 68pt + ''' + im_pil = im_utils.ensure_pil(im_utils.bgr2rgb(im)) + draw = ImageDraw.Draw(im_pil) + dim = im.shape[:2][::-1] + for x,y in points_norm: + x1, y1 = (int(x*dim[0]), int(y*dim[1])) + xyxy = (x1, y1, x1+radius, y1+radius) + draw.ellipse(xyxy, fill='white') + del draw + im_dst = im_utils.ensure_np(im_pil) + im_dst = im_utils.rgb2bgr(im_dst) + return im_dst + + def draw_landmarks3D(im, points, radius=3, color=(0,255,0)): '''Draws 3D facial landmarks ''' @@ -126,12 +144,26 @@ def draw_landmarks3D(im, points, radius=3, color=(0,255,0)): cv.circle(im_dst, (x,y), radius, color, -1, cv.LINE_AA) return im_dst -def draw_bbox(im, bbox_norm, color=(0,255,0), stroke_weight=2): +def draw_bbox(im, bboxes_norm, color=(0,255,0), stroke_weight=2): '''Draws BBox onto cv image + :param color: RGB value ''' - im_dst = im.copy() - bbox_dim = bbox_norm.to_dim(im.shape[:2][::-1]) - cv.rectangle(im_dst, bbox_dim.pt_tl, bbox_dim.pt_br, color, stroke_weight, cv.LINE_AA) + #im_dst = im.copy() + if not type(bboxes_norm) == list: + bboxes_norm = [bboxes_norm] + + im_pil = im_utils.ensure_pil(im_utils.bgr2rgb(im)) + im_pil_draw = ImageDraw.ImageDraw(im_pil) + + for bbox_norm in bboxes_norm: + bbox_dim = bbox_norm.to_dim(im.shape[:2][::-1]) + #cv.rectangle(im_dst, bbox_dim.pt_tl, bbox_dim.pt_br, color, stroke_weight, cv.LINE_AA) + xyxy = (bbox_dim.pt_tl, bbox_dim.pt_br) + im_pil_draw.rectangle(xyxy, outline=color, width=stroke_weight) + # draw.rectangle([x1, y1, x2, y2], outline=, width=3) + im_dst = im_utils.ensure_np(im_pil) + im_dst = im_utils.rgb2bgr(im_dst) + del im_pil_draw return im_dst def draw_pose(im, pt_nose, image_pts): diff --git a/megapixels/app/utils/im_utils.py b/megapixels/app/utils/im_utils.py index d36c1c32..670d5168 100644 --- a/megapixels/app/utils/im_utils.py +++ b/megapixels/app/utils/im_utils.py @@ -11,11 +11,6 @@ from skimage import feature import imutils import time import numpy as np -import torch -import torch.nn as nn -import torchvision.models as models -import torchvision.transforms as transforms -from torch.autograd import Variable from sklearn.metrics.pairwise import cosine_similarity import datetime @@ -293,6 +288,13 @@ def bgr2rgb(im): """ return cv.cvtColor(im,cv.COLOR_BGR2RGB) +def rgb2bgr(im): + """Wrapper for cv2.cvtColor transform + :param im: Numpy.ndarray (BGR) + :returns: Numpy.ndarray (RGB) + """ + return cv.cvtColor(im,cv.COLOR_RGB2BGR) + def compute_laplacian(im): # below 100 is usually blurry return cv.Laplacian(im, cv.CV_64F).var() @@ -329,7 +331,7 @@ def normalizedGraylevelVariance(img): s = stdev[0]**2 / mean[0] return s[0] -def compute_if_blank(im,width=100,sigma=0,thresh_canny=.1,thresh_mean=4,mask=None): +def is_blank(im,width=100,sigma=0,thresh_canny=.1,thresh_mean=4,mask=None): # im is graysacale np #im = imutils.resize(im,width=width) #mask = imutils.resize(mask,width=width) diff --git a/megapixels/commands/datasets/megaface_age_from_orig.py b/megapixels/commands/datasets/megaface_age_from_orig.py new file mode 100644 index 00000000..489bebf3 --- /dev/null +++ b/megapixels/commands/datasets/megaface_age_from_orig.py @@ -0,0 +1,62 @@ +import click + +@click.command() +@click.option('-i', '--input', 'opt_fp_in', required=True, + help='Input path to metadata directory') +@click.option('-o', '--output', 'opt_fp_out', + help='Output path to age CSV') +@click.pass_context +def cli(ctx, opt_fp_in, opt_fp_out): + """Creates CSV of MegaFace ages from original BBoxes""" + + import os + from os.path import join + from pathlib import Path + from glob import glob + + import dlib + import pandas as pd + from tqdm import tqdm + + from app.settings import types + from app.utils import click_utils + from app.settings import app_cfg + + from PIL import Image, ImageOps, ImageFilter + from app.utils import file_utils, im_utils, logger_utils + + log = logger_utils.Logger.getLogger() + + # ------------------------------------------------- + # process + fp_im_dirs = glob(join(opt_fp_in, '**/'), recursive=True) + + log.info('Found {} directories'.format(len(fp_im_dirs))) + + identities = {} + + for fp_im_dir in tqdm(fp_im_dirs): + # 1234567@N05_identity_1 + try: + dir_id_name = Path(fp_im_dir).name + nsid = dir_id_name.split('_')[0] + identity_num = dir_id_name.split('_')[2] + id_key = '{}_{}'.format(nsid, identity_num) + num_images = len(glob(join(fp_im_dir, '*.jpg'))) + if not id_key in identities.keys(): + identities[id_key] = {'nsid': nsid, 'identity': identity_num, 'images': num_images} + else: + identities[id_key]['images'] += num_images + except Exception as e: + continue + + # convert to dict + identities_list = [v for k, v in identities.items()] + df = pd.DataFrame.from_dict(identities_list) + + file_utils.mkdirs(opt_fp_out) + + log.info('Wrote {} lines to {}'.format(len(df), opt_fp_out)) + df.to_csv(opt_fp_out, index=False) + + diff --git a/megapixels/commands/demo/face_search.py b/megapixels/commands/demo/face_search.py index 4c7036f4..5218d501 100644 --- a/megapixels/commands/demo/face_search.py +++ b/megapixels/commands/demo/face_search.py @@ -10,7 +10,7 @@ log = Logger.getLogger() @click.command() @click.option('-i', '--input', 'opt_fp_in', required=True, - help='File to lookup') + help='Face image file to lookup') @click.option('--data_store', 'opt_data_store', type=cfg.DataStoreVar, default=click_utils.get_default(types.DataStore.HDD), diff --git a/megapixels/commands/processor/_old_files_to_face_rois.py b/megapixels/commands/processor/_old_files_to_face_rois.py index 895f4718..d92cbd74 100644 --- a/megapixels/commands/processor/_old_files_to_face_rois.py +++ b/megapixels/commands/processor/_old_files_to_face_rois.py @@ -1,4 +1,4 @@ - """ +""" Crop images to prepare for training """ diff --git a/megapixels/commands/processor/face_roi_from_annos.py b/megapixels/commands/processor/face_roi_from_annos.py new file mode 100644 index 00000000..fc933049 --- /dev/null +++ b/megapixels/commands/processor/face_roi_from_annos.py @@ -0,0 +1,187 @@ +""" +Crop images to prepare for training +""" + +import click +# from PIL import Image, ImageOps, ImageFilter, ImageDraw + +from app.settings import types +from app.utils import click_utils +from app.settings import app_cfg as cfg + +color_filters = {'color': 1, 'gray': 2, 'all': 3} + +@click.command() +@click.option('-i', '--input', 'opt_fp_in', default=None, + help='Override enum input filename CSV') +@click.option('-o', '--output', 'opt_fp_out', default=None, + help='Override enum output filename CSV') +@click.option('-m', '--media', 'opt_dir_media', default=None, + help='Override enum media directory') +@click.option('--store', 'opt_data_store', + type=cfg.DataStoreVar, + default=click_utils.get_default(types.DataStore.HDD), + show_default=True, + help=click_utils.show_help(types.Dataset)) +@click.option('--dataset', 'opt_dataset', + type=cfg.DatasetVar, + required=True, + show_default=True, + help=click_utils.show_help(types.Dataset)) +@click.option('--size', 'opt_size', + type=(int, int), default=(480, 480), + help='Output image size') +@click.option('-d', '--detector', 'opt_detector_type', + type=cfg.FaceDetectNetVar, + default=click_utils.get_default(types.FaceDetectNet.CVDNN), + help=click_utils.show_help(types.FaceDetectNet)) +@click.option('-g', '--gpu', 'opt_gpu', default=0, + help='GPU index') +@click.option('--conf', 'opt_conf_thresh', default=0.85, type=click.FloatRange(0,1), + help='Confidence minimum threshold') +@click.option('-p', '--pyramids', 'opt_pyramids', default=0, type=click.IntRange(0,4), + help='Number pyramids to upscale for DLIB detectors') +@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None), + help='Slice list of files') +@click.option('--display/--no-display', 'opt_display', is_flag=True, default=False, + help='Display detections to debug') +@click.option('-f', '--force', 'opt_force', is_flag=True, + help='Force overwrite file') +@click.option('--color', 'opt_color_filter', + type=click.Choice(color_filters.keys()), default='color', + help='Filter to keep color or grayscale images (color = keep color') +@click.option('--keep', 'opt_largest', type=click.Choice(['largest', 'all']), default='largest', + help='Only keep largest face') +@click.option('--zone', 'opt_zone', default=(0.0, 0.0), type=(float, float), + help='Face center must be located within zone region (0.5 = half width/height)') +@click.pass_context +def cli(ctx, opt_fp_in, opt_dir_media, opt_fp_out, opt_data_store, opt_dataset, opt_size, opt_detector_type, + opt_gpu, opt_conf_thresh, opt_pyramids, opt_slice, opt_display, opt_force, opt_color_filter, + opt_largest, opt_zone): + """Converts frames with faces to CSV of ROIs""" + + import sys + import os + from os.path import join + from pathlib import Path + from glob import glob + + from tqdm import tqdm + import numpy as np + import dlib # must keep a local reference for dlib + import cv2 as cv + import pandas as pd + + from app.utils import logger_utils, file_utils, im_utils, display_utils, draw_utils + from app.processors import face_detector + from app.models.data_store import DataStore + + # ------------------------------------------------- + # init here + + log = logger_utils.Logger.getLogger() + + # set data_store + data_store = DataStore(opt_data_store, opt_dataset) + + # get filepath out + fp_out = data_store.metadata(types.Metadata.FACE_ROI) if opt_fp_out is None else opt_fp_out + if not opt_force and Path(fp_out).exists(): + log.error('File exists. Use "-f / --force" to overwite') + return + + # set detector + if opt_detector_type == types.FaceDetectNet.CVDNN: + detector = face_detector.DetectorCVDNN() + elif opt_detector_type == types.FaceDetectNet.DLIB_CNN: + detector = face_detector.DetectorDLIBCNN(gpu=opt_gpu) + elif opt_detector_type == types.FaceDetectNet.DLIB_HOG: + detector = face_detector.DetectorDLIBHOG() + elif opt_detector_type == types.FaceDetectNet.MTCNN_TF: + detector = face_detector.DetectorMTCNN_TF(gpu=opt_gpu) + elif opt_detector_type == types.FaceDetectNet.HAAR: + log.error('{} not yet implemented'.format(opt_detector_type.name)) + return + + + # get list of files to process + fp_record = data_store.metadata(types.Metadata.FILE_RECORD) if opt_fp_in is None else opt_fp_in + df_record = pd.read_csv(fp_record, dtype=cfg.FILE_RECORD_DTYPES).set_index('index') + if opt_slice: + df_record = df_record[opt_slice[0]:opt_slice[1]] + log.debug('processing {:,} files'.format(len(df_record))) + + # filter out grayscale + color_filter = color_filters[opt_color_filter] + # set largest flag, to keep all or only largest + opt_largest = (opt_largest == 'largest') + + data = [] + skipped_files = [] + processed_files = [] + + for df_record in tqdm(df_record.itertuples(), total=len(df_record)): + fp_im = data_store.face(str(df_record.subdir), str(df_record.fn), str(df_record.ext)) + try: + im = cv.imread(fp_im) + im_resized = im_utils.resize(im, width=opt_size[0], height=opt_size[1]) + except Exception as e: + log.debug(f'could not read: {fp_im}') + return + # filter out color or grayscale iamges + if color_filter != color_filters['all']: + try: + is_gray = im_utils.is_grayscale(im) + if is_gray and color_filter != color_filters['gray']: + log.debug('Skipping grayscale image: {}'.format(fp_im)) + continue + except Exception as e: + log.error('Could not check grayscale: {}'.format(fp_im)) + continue + + try: + bboxes_norm = detector.detect(im_resized, pyramids=opt_pyramids, largest=opt_largest, + zone=opt_zone, conf_thresh=opt_conf_thresh) + except Exception as e: + log.error('could not detect: {}'.format(fp_im)) + log.error('{}'.format(e)) + continue + + if len(bboxes_norm) == 0: + skipped_files.append(fp_im) + log.warn(f'no faces in: {fp_im}') + log.warn(f'skipped: {len(skipped_files)}. found:{len(processed_files)} files') + else: + processed_files.append(fp_im) + for bbox in bboxes_norm: + roi = { + 'record_index': int(df_record.Index), + 'x': bbox.x, + 'y': bbox.y, + 'w': bbox.w, + 'h': bbox.h + } + data.append(roi) + + # if display optined + if opt_display and len(bboxes_norm): + # draw each box + for bbox_norm in bboxes_norm: + dim = im_resized.shape[:2][::-1] + bbox_dim = bbox.to_dim(dim) + if dim[0] > 1000: + im_resized = im_utils.resize(im_resized, width=1000) + im_resized = draw_utils.draw_bbox(im_resized, bbox_norm) + + # display and wait + cv.imshow('', im_resized) + display_utils.handle_keyboard() + + # create DataFrame and save to CSV + file_utils.mkdirs(fp_out) + df = pd.DataFrame.from_dict(data) + df.index.name = 'index' + df.to_csv(fp_out) + + # save script + file_utils.write_text(' '.join(sys.argv), '{}.sh'.format(fp_out))
\ No newline at end of file diff --git a/megapixels/commands/datasets/file_record.py b/megapixels/commands/processor/file_record.py index 41a5df28..6403c768 100644 --- a/megapixels/commands/datasets/file_record.py +++ b/megapixels/commands/processor/file_record.py @@ -78,7 +78,7 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_dataset, opt_data_store, opt_dir_media, fp_out = data_store.metadata(types.Metadata.FILE_RECORD) if opt_fp_out is None else opt_fp_out # exit if exists if not opt_force and Path(fp_out).exists(): - log.error('File exists. Use "-f / --force" to overwite') + log.error(f'File {fp_out} exists. Use "-f / --force" to overwite') return # ---------------------------------------------------------------- diff --git a/megapixels/commands/site/age_gender_to_site.py b/megapixels/commands/site/age_gender_to_site.py new file mode 100644 index 00000000..3ad24a8d --- /dev/null +++ b/megapixels/commands/site/age_gender_to_site.py @@ -0,0 +1,100 @@ +""" + +""" + +import click + +from app.settings import types +from app.utils import click_utils +from app.settings import app_cfg as cfg + + +@click.command() +@click.option('-i', '--input', 'opt_fp_in', default=None, + help='Override enum input filename CSV') +@click.option('-o', '--output', 'opt_fp_out', default=None, + help='Override enum output filename CSV') +@click.option('-m', '--media', 'opt_dir_media', default=None, + help='Override enum media directory') +@click.option('--store', 'opt_data_store', + type=cfg.DataStoreVar, + default=click_utils.get_default(types.DataStore.HDD), + show_default=True, + help=click_utils.show_help(types.Dataset)) +@click.option('--dataset', 'opt_dataset', + type=cfg.DatasetVar, + required=True, + show_default=True, + help=click_utils.show_help(types.Dataset)) +@click.option('-f', '--force', 'opt_force', is_flag=True, + help='Force overwrite file') +@click.pass_context +def cli(ctx, opt_fp_in, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset, opt_force): + """Converts age/gender to CSV for pie chartgs""" + + import sys + import os + from os.path import join + from pathlib import Path + from glob import glob + + from tqdm import tqdm + import numpy as np + import cv2 as cv + import pandas as pd + + from app.utils import logger_utils + from app.models.data_store import DataStore + + # ------------------------------------------------------------------------- + # init here + + log = logger_utils.Logger.getLogger() + + # init filepaths + data_store = DataStore(opt_data_store, opt_dataset) + # set file output path + metadata_type = types.Metadata.FACE_ATTRIBUTES + fp_in = data_store.metadata(metadata_type) if opt_fp_out is None else opt_fp_in + dk = opt_dataset.name.lower() + log.debug(f'dk: {dk}') + fp_out_age = f'../site/content/pages/datasets/{dk}/assets/age.csv' + fp_out_gender = f'../site/content/pages/datasets/{dk}/assets/gender.csv' + + if not opt_force and (Path(fp_out_age).exists() or Path(fp_out_gender).exists()): + log.error('File exists. Use "-f / --force" to overwite') + return + + # ------------------------------------------------------------------------- + # Age + + df = pd.read_csv(fp_in) + + results = [] + brackets = [(0, 12), (13, 18), (19,24), (25, 34), (35, 44), (45, 54), (55, 64), (64, 75), (75, 100)] + df_age = df['age_real'] + + for a1, a2 in brackets: + n = len(df_age.loc[((df_age >= a1) & (df_age <= a2))]) + results.append({'age': f'{a1} - {a2}', 'faces': n}) + + df_out = pd.DataFrame.from_dict(results) + df_out = df_out[['age','faces']] + df_out.to_csv(fp_out_age, index=False) + + # Gender + results = [] + + df_f = df['f'] + nm = len(df_f.loc[((df_f < 0.33))]) + nnb = len(df_f.loc[((df_f >= 0.33) & (df_f <= 0.66))]) + nf = len(df_f.loc[((df_f > 0.66))]) + + results = [] + results.append({'gender': 'Male', 'faces':nm}) + results.append({'gender': 'Female', 'faces': nf}) + results.append({'gender': 'They', 'faces': nnb}) + + df_out = pd.DataFrame.from_dict(results) + df_out = df_out[['gender','faces']] + df_out.to_csv(fp_out_gender, index=False)
\ No newline at end of file diff --git a/site/assets/css/applets.css b/site/assets/css/applets.css index daf36a19..245643f1 100644 --- a/site/assets/css/applets.css +++ b/site/assets/css/applets.css @@ -187,6 +187,7 @@ .tabulator { font-family: 'Roboto', sans-serif; + font-size:10px; } .tabulator-row { transition: background-color 100ms cubic-bezier(0,0,1,1); @@ -247,7 +248,7 @@ stroke: rgba(64,64,64,0.3); } .chartCaption { - color: #888; + color: #333; font-size: 12px; font-family: 'Roboto', sans-serif; font-weight: 400; diff --git a/site/assets/css/css.css b/site/assets/css/css.css index 6b1f40cd..75f1ad3f 100644 --- a/site/assets/css/css.css +++ b/site/assets/css/css.css @@ -12,11 +12,11 @@ html, body { min-height: 100%; /*font-family: 'Roboto Mono', sans-serif;*/ font-family: 'Roboto', sans-serif; - color: #eee; + color: #000; overflow-x: hidden; } html { - background: #181818; + background: #fff; } a { outline: none; } img { border: 0; } @@ -33,6 +33,7 @@ html.mobile .content{ } /* header */ +/* header */ header { position: fixed; @@ -155,7 +156,7 @@ footer { display: flex; flex-direction: row; justify-content: space-between; - color: #666; + color: #000; font-size: 13px; /*line-height: 17px;*/ padding: 15px; @@ -211,30 +212,34 @@ footer ul:last-child li { /* headings */ h1 { - color: #eee; - font-weight: 400; - font-size: 34pt; + color: #000; + font-weight: 500; + font-size: 30pt; margin: 20px auto 10px auto; padding: 0; transition: color 0.1s cubic-bezier(0,0,1,1); font-family: 'Roboto Mono', monospace; + text-transform: uppercase; } h2 { - color: #eee; - font-weight: 400; + color: #111; + font-weight: 500; font-size: 34px; line-height: 43px; margin: 20px auto 20px auto; padding: 0; transition: color 0.1s cubic-bezier(0,0,1,1); font-family: 'Roboto Mono', monospace; + text-transform: uppercase; } h3 { + color: #333; margin: 20px auto 10px auto; font-size: 28px; font-weight: 400; transition: color 0.1s cubic-bezier(0,0,1,1); font-family: 'Roboto Mono', monospace; + text-transform: uppercase; } h4 { margin: 6px auto 10px auto; @@ -243,6 +248,7 @@ h4 { font-weight: 400; transition: color 0.1s cubic-bezier(0,0,1,1); font-family: 'Roboto Mono', monospace; + text-transform: uppercase; } h5 { margin: 6px auto 10px auto; @@ -253,11 +259,11 @@ h5 { font-family: 'Roboto Mono', monospace; } .content h3 a { - color: #888; + color: #333; text-decoration: none; } .desktop .content h3 a:hover { - color: #fff; + color: #111; text-decoration: underline; } .right-sidebar h3 { @@ -272,12 +278,15 @@ h5 { .right-sidebar ul li a { border-bottom: 0; } +.right-sidebar ul li:last-child{ + border-bottom: 0; +} th, .gray { font-family: 'Roboto', monospace; font-weight: 500; text-transform: uppercase; letter-spacing: .15rem; - color: #777; + color: #333; } th, .gray { font-size: 9pt; @@ -354,10 +363,10 @@ section { } section p { margin: 10px auto 20px auto; - line-height: 1.9rem; - font-size: 17px; + line-height: 1.95rem; + font-size: 16px; font-weight: 400; - color: #cdcdcd; + color: #111; } section ul { margin: 10px auto 20px auto; @@ -367,22 +376,32 @@ section h1, section h2, section h3, section h4, section h5, section h6, section max-width: 720px; } -.content-dataset section:nth-child(2) p:first-child{ - font-size:19px; +.content-dataset-list section:nth-child(1) p:nth-child(2){ + font-size:22px; + line-height:34px; +} +.content-dataset section:nth-child(4) p:nth-child(2){ + font-size:20px; + line-height: 32px; + color:#000; +} +.content-dataset section:nth-child(3) p:nth-child(2) { + /* highlight news text */ + color:#f00; } p.subp{ font-size: 14px; } .content a { - color: #dedede; + color: #333; text-decoration: none; - border-bottom: 2px solid #666; + border-bottom: 1px solid #333; padding-bottom: 1px; transition: color 0.1s cubic-bezier(0,0,1,1); } .desktop .content a:hover { - color: #fff; - border-bottom: 2px solid #ccc; + color: #111; + border-bottom: 1px solid #111; } /* top of post metadata */ @@ -393,7 +412,7 @@ p.subp{ justify-content: flex-start; align-items: flex-start; font-size: 12px; - color: #ccc; + color: #111; margin-bottom: 20px; font-family: 'Roboto', sans-serif; margin-right: 20px; @@ -412,7 +431,6 @@ p.subp{ float: right; width: 200px; margin: 0px 20px 20px 20px; - padding-top: 12px; padding-left: 20px; border-left: 1px solid #333; font-family: 'Roboto'; @@ -442,7 +460,10 @@ p.subp{ border-bottom: 1px solid #333; padding:10px 10px 10px 0; margin: 0 4px 4px 0; - color: #bbb; + color: #111; +} +.right-sidebar .meta:last-child{ + border-bottom: 0; } .right-sidebar ul { margin-bottom: 10px; @@ -477,7 +498,7 @@ ul { } ul li { margin-bottom: 8px; - color: #dedede; + color: #333; font-weight: 400; font-size: 14px; } @@ -497,8 +518,9 @@ pre { border-radius: 2px; padding: 10px; display: block; - background: #333; + background: #ddd; overflow: auto + /*margin-bottom: 10px;*/ } pre code { display: block; @@ -533,10 +555,10 @@ table tr td{ font-size:12px; } table tbody tr:nth-child(odd){ - background-color:#292929; + background-color:#ebebeb; } table tbody tr:nth-child(even){ - background-color:#333; + background-color:#ccc; } hr { @@ -670,22 +692,24 @@ section.fullwidth .image { } .image .caption.intro-caption{ text-align: center; + color:#666; } .caption { text-align: center; font-size: 10pt; - color: #999; + line-height: 14pt; + color: #555; max-width: 960px; margin: 10px auto 10px auto; font-family: 'Roboto'; } .caption a { - color: #ccc; - border: 0; + color: #333; + border-bottom: 1px solid #333; } .desktop .caption a:hover { - color: #fff; - border: 0; + color: #111; + border-bottom: 1px solid #111; } @@ -873,7 +897,7 @@ section.fullwidth .image { .dataset-list .dataset { width: 300px; padding: 12px; - color: white; + color: #000; font-weight: 400; font-family: 'Roboto'; position: relative; @@ -884,21 +908,22 @@ section.fullwidth .image { height: 178px; } .desktop .content .dataset-list a { - border: 1px solid #333; + border: 1px solid #999; } .desktop .dataset-list a:hover { - border: 1px solid #666; + border: 1px solid #000; } .dataset-list .fields { font-size: 12px; - color: #ccc; + line-height: 17px; + color: #333; } .dataset-list .dataset .title{ font-size: 16px; line-height: 20px; margin-bottom: 4px; - font-weight: 400; + font-weight: 500; display: block; } .dataset-list .fields div { @@ -965,7 +990,7 @@ section.intro_section { justify-content: center; align-items: center; background-color: #111111; - margin-bottom: 20px; + /*margin-bottom: 20px;*/ padding: 0; } .intro_section .inner { @@ -1091,7 +1116,8 @@ ul.map-legend li:before { } ul.map-legend li.active { text-decoration: underline; - color: #fff; + color: #000; + font-weight: 500; } ul.map-legend li.edu:before { background-color: #f2f293; @@ -1118,7 +1144,7 @@ ul.map-legend li.source:before { } .content-about { - color: #fff; + /*color: #fff;*/ } .content-about p { font-size: 16px; @@ -1141,12 +1167,13 @@ ul.map-legend li.source:before { } .content-about .about-menu ul li a { border-bottom: 0; - color: #aaa; + color: #555; } .content-about .about-menu ul li a.current { - border-bottom: 1px solid #ddd; - color: #ddd; + border-bottom: 1px solid #000; + color: #000; + font-weight: 500; } /* columns */ @@ -1237,7 +1264,7 @@ a.footnote { /*display: inline-block;*/ bottom: 7px; text-decoration: none; - color: #ff8; + color: #666; border: 0; left: -1px; transition-duration: 0s; @@ -1255,7 +1282,7 @@ a.footnote_shim { } .desktop a.footnote:hover { /*background-color: #ff8;*/ - color: #fff; + color: #000; border: 0; } .backlinks { diff --git a/site/assets/css/tabulator.css b/site/assets/css/tabulator.css index d7a3fab3..baf44536 100755 --- a/site/assets/css/tabulator.css +++ b/site/assets/css/tabulator.css @@ -1,7 +1,7 @@ /* Tabulator v4.1.3 (c) Oliver Folkerd */ .tabulator { position: relative; - font-size: 13px; + font-size: 12px; text-align: left; overflow: hidden; -ms-transform: translatez(0); diff --git a/site/includes/age_gender_disclaimer.html b/site/includes/age_gender_disclaimer.html new file mode 100644 index 00000000..f8dceb62 --- /dev/null +++ b/site/includes/age_gender_disclaimer.html @@ -0,0 +1,3 @@ +<section> + <p>Age and gender estimation distribution were calculated by anlayzing all faces in the dataset images. This may include additional faces appearing next to an annotated face, or this may skip false faces that were erroneously included as part of the original dataset. These numbers are provided as an estimation and not a factual representation of the exact gender and age of all faces.</p> +</section>
\ No newline at end of file diff --git a/site/includes/chart.html b/site/includes/chart.html deleted file mode 100644 index 01c2e83b..00000000 --- a/site/includes/chart.html +++ /dev/null @@ -1,14 +0,0 @@ -<section> - <h3>Who used {{ metadata.meta.dataset.name_display }}?</h3> - - <p> - This bar chart presents a ranking of the top countries where dataset citations originated. Mouse over individual columns to see yearly totals. These charts show at most the top 10 countries. - </p> - - </section> - -<section class="applet_container"> -<!-- <div style="position: absolute;top: 0px;right: -55px;width: 180px;font-size: 14px;">Labeled Faces in the Wild Dataset<br><span class="numc" style="font-size: 11px;">20 citations</span> -</div> --> - <div class="applet" data-payload="{"command": "chart"}"></div> -</section> diff --git a/site/includes/dashboard.html b/site/includes/dashboard.html index d5e5693d..02d054b5 100644 --- a/site/includes/dashboard.html +++ b/site/includes/dashboard.html @@ -19,10 +19,10 @@ <section> - <h3>Information Supply chain</h3> + <h3>Information Supply Chain</h3> <p> - To help understand how {{ metadata.meta.dataset.name_display }} has been used around the world by commercial, military, and academic organizations; existing publicly available research citing {{ metadata.meta.dataset.name_full }} was collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the markers to reveal research projects at that location. + To help understand how {{ metadata.meta.dataset.name_display }} has been used around the world by commercial, military, and academic organizations; existing publicly available research citing {{ metadata.meta.dataset.name_full }} was collected, verified, and geocoded to show how AI training data has proliferated around the world. Click on the markers to reveal research projects at that location. </p> </section> diff --git a/site/includes/map.html b/site/includes/map.html deleted file mode 100644 index 372bed8d..00000000 --- a/site/includes/map.html +++ /dev/null @@ -1,22 +0,0 @@ -<section> - - <h3>Information Supply Chain</h3> - - <p> - To help understand how {{ metadata.meta.dataset.name_display }} has been used around the world by commercial, military, and academic organizations; existing publicly available research citing {{ metadata.meta.dataset.name_full }} was collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the location markers to reveal research projects at that location. - </p> - - </section> - -<section class="applet_container fullwidth"> - <div class="applet" data-payload="{"command": "map"}"></div> -</section> - -<div class="caption"> - <ul class="map-legend"> - <li class="edu">Academic</li> - <li class="com">Commercial</li> - <li class="gov">Military / Government</li> - </ul> - <div class="source">Citation data is collected using <a href="https://semanticscholar.org" target="_blank">SemanticScholar.org</a> and then dataset usage verified and geolocated.</div > -</div>
\ No newline at end of file diff --git a/site/public/about/index.html b/site/public/about/index.html index ce2b6228..427a97a2 100644 --- a/site/public/about/index.html +++ b/site/public/about/index.html @@ -63,22 +63,9 @@ <li><a href="/about/attribution/">Attribution</a></li> <li><a href="/about/legal/">Legal / Privacy</a></li> </ul> -</section><p>MegaPixels is an independent art and research project by Adam Harvey and Jules LaPlace that investigates the ethics, origins, and individual privacy implications of face recognition image datasets and their role in the expansion of biometric surveillance technologies.</p> +</section><p>MegaPixels is an independent art and research project by <a href="https://ahprojects.com">Adam Harvey</a> and <a href="https://asdf.us">Jules LaPlace</a> that investigates the ethics, origins, and individual privacy implications of face recognition image datasets and their role in the expansion of biometric surveillance technologies.</p> <p>MegaPixels is made possible with support from <a href="http://mozilla.org">Mozilla</a></p> -<div class="flex-container team-photos-container"> - <div class="team-member"> - <h3>Adam Harvey</h3> - <p>is Berlin-based American artist and researcher. His previous projects (<a href="https://cvdazzle.com">CV Dazzle</a>, <a href="https://ahprojects.com/stealth-wear">Stealth Wear</a>, and <a href="https://github.com/adamhrv/skylift">SkyLift</a>) explore the potential for counter-surveillance as artwork. He is the founder of <a href="https://vframe.io">VFRAME</a> (visual forensics software for human rights groups) and is a currently researcher in residence at Karlsruhe HfG.</p> - <p><a href="https://ahprojects.com">ahprojects.com</a></p> - </p> - </div> - <div class="team-member"> - <h3>Jules LaPlace</h3> - <p>is an American technologist and artist also based in Berlin. He was previously the CTO of a digital agency in NYC and now also works at VFRAME, developing computer vision and data analysis software for human rights groups. Jules also builds experimental software for artists and musicians. - </p> - <p><a href="https://asdf.us/">asdf.us</a></p> - </div> -</div><p>MegaPixels is an art and research project first launched in 2017 for an <a href="https://ahprojects.com/megapixels-glassroom/">installation</a> at Tactical Technology Collective's <a href="https://tacticaltech.org/pages/glass-room-london-press/">GlassRoom</a> about face recognition datasets. In 2018 MegaPixels was extended to cover pedestrian analysis datasets for a <a href="https://esc.mur.at/de/node/2370">commission by Elevate Arts festival</a> in Austria. Since then MegaPixels has evolved into a large-scale interrogation of hundreds of publicly-available face and person analysis datasets, the first of which launched on this site in April 2019.</p> +<p>MegaPixels is an art and research project first launched in 2017 for an <a href="https://ahprojects.com/megapixels-glassroom/">installation</a> at Tactical Technology Collective's <a href="https://tacticaltech.org/pages/glass-room-london-press/">GlassRoom</a> about face recognition datasets. In 2018 MegaPixels was extended to cover pedestrian analysis datasets for a <a href="https://esc.mur.at/de/node/2370">commission by Elevate Arts festival</a> in Austria. Since then MegaPixels has evolved into a large-scale interrogation of hundreds of publicly-available face and person analysis datasets, the first of which launched on this site in April 2019.</p> <p>MegaPixels aims to provide a critical perspective on machine learning image datasets, one that might otherwise escape academia and industry funded artificial intelligence think tanks that are often supported by the same technology companies who created many of the datasets presented on this site.</p> <p>MegaPixels is an independent project, designed as a public resource for educators, students, journalists, and researchers. Each dataset presented on this site undergoes a thorough review of its images, intent, and citations. MegaPixels is a website-first research project, with an academic publication to follow in fall 2019.</p> <p>A dataset of verified geocoded citations and dataset statistics will be published in Fall 2019 along with a research paper as part of a research fellowship for <a href="http://kim.hfg-karlsruhe.de/">KIM (Critical Artificial Intelligence) Karlsruhe HfG</a>.</p> @@ -90,18 +77,18 @@ <li>June 26, 2019: The Atlantic writes about image training datasets "in the wild" and research ethics: <a href="https://www.theatlantic.com/technology/archive/2019/06/universities-record-students-campuses-research/592537/">Universities Record Students on Campuses for Research</a> by Sidney Fussell</li> </ul> <p>Read more <a href="/about/news">news</a></p> -</section><section><div class='columns columns-3'><div class='column'><h5>Team</h5> +<h5>Team</h5> <ul> <li>Adam Harvey: Concept, research and analysis, design, computer vision</li> <li>Jules LaPlace: Information and systems architecture, data management, web applications</li> </ul> -</div><div class='column'><h5>Contributing Researchers</h5> +<h5>Contributing Researchers</h5> <ul> <li>Beth (aka Ms. Celeb)</li> <li>Berit Gilma</li> <li>Mathana Stender</li> </ul> -</div><div class='column'><h5>Code and Libraries</h5> +<h5>Code and Libraries</h5> <ul> <li><a href="https://semanticscholar.org">Semantic Scholar</a> for citation aggregation</li> <li>Leaflet.js for maps</li> @@ -109,7 +96,7 @@ <li>ThreeJS for 3D visualizations</li> <li>PDFMiner.Six and Pandas for research paper analysis</li> </ul> -</div></div></section><section><h5>Attribution</h5> +<h5>Attribution</h5> <p>If you use MegaPixels or any data derived from it for your work, please cite our original work as follows:</p> <pre> @online{megapixels, @@ -119,9 +106,7 @@ url = {https://megapixels.cc/}, urldate = {2019-04-18} } -</pre><h5>Contact</h5> -<p>Please direct questions, comments, or feedback to <a href="https://mastodon.social/@adamhrv">mastodon.social/@adamhrv</a> or contact via <a href="https://ahprojects.com/about">https://ahprojects.com/about</a></p> -</section> +</pre></section> </div> <footer> diff --git a/site/public/about/legal/index.html b/site/public/about/legal/index.html index 8beafeea..0beebd43 100644 --- a/site/public/about/legal/index.html +++ b/site/public/about/legal/index.html @@ -65,9 +65,9 @@ </ul> </section><p>MegaPixels.cc Terms and Privacy</p> <p>MegaPixels is an independent and academic art and research project about the origins and ethics of publicly available face analysis image datasets. By accessing MegaPixels (the <em>Service</em> or <em>Services</em>) you agree to the terms and conditions set forth below.</p> -<h2>Privacy</h2> +<h3>Privacy</h3> <p>The MegaPixels site has been designed to minimize the amount of network requests to 3rd party services and therefore prioritize the privacy of the viewer. This site does not use any local or external analytics programs to monitor site viewers. In fact, the only data collected are the necessary server logs used only for preventing misuse, which are deleted at short-term intervals.</p> -<h2>3rd Party Services</h2> +<h3>3rd Party Services</h3> <p>In order to provide certain features of the site, some 3rd party services are needed. Currently, the MegaPixels.cc site uses two 3rd party services: (1) Leaflet.js for the interactive map and (2) Digital Ocean Spaces as a content delivery network. Both services encrypt your requests to their server using HTTPS and neither service requires storing any cookies or authentication. However, both services will store files in your web browser's local cache (local storage) to improve loading performance. None of these local storage files are using for analytics, tracking, or any similar purpose.</p> <h3>Links To Other Web Sites</h3> <p>The MegaPixels.cc contains many links to 3rd party websites, especially in the list of citations that are provided for each dataset. This website has no control over and assumes no responsibility for the content, privacy policies, or practices of any third party web sites or services. You acknowledge and agree that megapixels.cc (and its creators) shall not be responsible or liable, directly or indirectly, for any damage or loss caused or alleged to be caused by or in connection with use of or reliance on any such content, goods or services available on or through any such web sites or services.</p> diff --git a/site/public/datasets/adience/index.html b/site/public/datasets/adience/index.html index b2aa2733..a03fb3c6 100644 --- a/site/public/datasets/adience/index.html +++ b/site/public/datasets/adience/index.html @@ -55,8 +55,7 @@ </header> <div class="content content-dataset"> - <section class='intro_section' style='background-image: url(https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/adience/assets/background.jpg)'><div class='inner'><div class='hero_desc'><span class='bgpad'>Adience ...</span></div><div class='hero_subdesc'><span class='bgpad'>Adience ... -</span></div></div></section><section><h2>Adience</h2> + <section class='intro_section' style='background-image: url(https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/adience/assets/background.jpg)'></section><section><h2>Adience</h2> </section><section><div class='right-sidebar'><div class='meta'> <div class='gray'>Published</div> <div>2014</div> @@ -97,10 +96,10 @@ <section> - <h3>Information Supply chain</h3> + <h3>Information Supply Chain</h3> <p> - To help understand how Adience Benchmark Dataset has been used around the world by commercial, military, and academic organizations; existing publicly available research citing Adience Benchmark was collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the markers to reveal research projects at that location. + To help understand how Adience Benchmark Dataset has been used around the world by commercial, military, and academic organizations; existing publicly available research citing Adience Benchmark was collected, verified, and geocoded to show the information supply chains of people appearing in the images. Click on the markers to reveal research projects at that location. </p> </section> diff --git a/site/public/datasets/brainwash/index.html b/site/public/datasets/brainwash/index.html index 18600b6f..d715d163 100644 --- a/site/public/datasets/brainwash/index.html +++ b/site/public/datasets/brainwash/index.html @@ -55,8 +55,8 @@ </header> <div class="content content-dataset"> - <section class='intro_section' style='background-image: url(https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/brainwash/assets/background.jpg)'><div class='inner'><div class='hero_desc'><span class='bgpad'>Brainwash is a dataset of webcam images taken from the Brainwash Cafe in San Francisco</span></div><div class='hero_subdesc'><span class='bgpad'>It includes 11,917 images of "everyday life of a busy downtown cafe" and is used for training face and head detection algorithms -</span></div></div></section><section><h2>Brainwash Dataset</h2> + <section class='intro_section' style='background-image: url(https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/brainwash/assets/background.jpg)'></section><section><div class='image'><div class='intro-caption caption'>One of 11,917 images from the Brainwash dataset captured from the Brainwash Cafe in San Francisco</div></div></section><section><h1>Brainwash Dataset</h1> +<p><em>Update: In response to the publication of this report, the Brainwash dataset has been "removed from access at the request of the depositor."</em></p> </section><section><div class='right-sidebar'><div class='meta'> <div class='gray'>Published</div> <div>2015</div> @@ -78,7 +78,7 @@ </div><div class='meta'> <div class='gray'>Website</div> <div><a href='https://purl.stanford.edu/sx925dc9385' target='_blank' rel='nofollow noopener'>stanford.edu</a></div> - </div></div><p>Brainwash is a dataset of livecam images taken from San Francisco's Brainwash Cafe. It includes 11,917 images of "everyday life of a busy downtown cafe"<a class="footnote_shim" name="[^readme]_1"> </a><a href="#[^readme]" class="footnote" title="Footnote 1">1</a> captured at 100 second intervals throughout the day. The Brainwash dataset includes 3 full days of webcam images taken on October 27, November 13, and November 24 in 2014. According the author's <a href="https://www.semanticscholar.org/paper/End-to-End-People-Detection-in-Crowded-Scenes-Stewart-Andriluka/1bd1645a629f1b612960ab9bba276afd4cf7c666">research paper</a> introducing the dataset, the images were acquired with the help of Angelcam.com. <a class="footnote_shim" name="[^end_to_end]_1"> </a><a href="#[^end_to_end]" class="footnote" title="Footnote 2">2</a></p> + </div><div class='meta'><div class='gray'>Press coverage</div><div><a href="https://www.nytimes.com/2019/07/13/technology/">New York Times</a>, <a href="https://www.tijd.be/dossier/legrandinconnu/brainwash/10136670.html">De Tijd</a></div></div></div><p>Brainwash is a dataset of livecam images taken from San Francisco's Brainwash Cafe. It includes 11,917 images of "everyday life of a busy downtown cafe"<a class="footnote_shim" name="[^readme]_1"> </a><a href="#[^readme]" class="footnote" title="Footnote 1">1</a> captured at 100 second intervals throughout the day. The Brainwash dataset includes 3 full days of webcam images taken on October 27, November 13, and November 24 in 2014. According the author's <a href="https://www.semanticscholar.org/paper/End-to-End-People-Detection-in-Crowded-Scenes-Stewart-Andriluka/1bd1645a629f1b612960ab9bba276afd4cf7c666">research paper</a> introducing the dataset, the images were acquired with the help of Angelcam.com. <a class="footnote_shim" name="[^end_to_end]_1"> </a><a href="#[^end_to_end]" class="footnote" title="Footnote 2">2</a></p> <p>The Brainwash dataset is unique because it uses images from a publicly available webcam that records people inside a privately owned business without their consent. No ordinary cafe customer could ever suspect that their image would end up in dataset used for surveillance research and development, but that is exactly what happened to customers at Brainwash Cafe in San Francisco.</p> <p>Although Brainwash appears to be a less popular dataset, it was notably used in 2016 and 2017 by researchers affiliated with the National University of Defense Technology in China for two <a href="https://www.semanticscholar.org/paper/Localized-region-context-and-object-feature-fusion-Li-Dou/b02d31c640b0a31fb18c4f170d841d8e21ffb66c">research</a> <a href="https://www.semanticscholar.org/paper/A-Replacement-Algorithm-of-Non-Maximum-Suppression-Zhao-Wang/591a4bfa6380c9fcd5f3ae690e3ac5c09b7bf37b">projects</a> on advancing the capabilities of object detection to more accurately isolate the target region in an image. <a class="footnote_shim" name="[^localized_region_context]_1"> </a><a href="#[^localized_region_context]" class="footnote" title="Footnote 3">3</a> <a class="footnote_shim" name="[^replacement_algorithm]_1"> </a><a href="#[^replacement_algorithm]" class="footnote" title="Footnote 4">4</a> The <a href="https://en.wikipedia.org/wiki/National_University_of_Defense_Technology">National University of Defense Technology</a> is controlled by China's top military body, the Central Military Commission.</p> <p>The Brainwash dataset also appears in a 2018 research paper affiliated with Megvii (Face++) that used images from Brainwash cafe "to validate the generalization ability of [their] CrowdHuman dataset for head detection."<a class="footnote_shim" name="[^crowdhuman]_1"> </a><a href="#[^crowdhuman]" class="footnote" title="Footnote 5">5</a>. Megvii is the parent company of Face++, who has provided surveillance technology to <a href="https://www.nytimes.com/2019/04/14/technology/china-surveillance-artificial-intelligence-racial-profiling.html">monitor Uighur Muslims</a> in Xinjiang and may be <a href="https://www.bloomberg.com/news/articles/2019-05-22/trump-weighs-blacklisting-two-chinese-surveillance-companies">blacklisted</a> in the United States.</p> @@ -106,10 +106,10 @@ <section> - <h3>Information Supply chain</h3> + <h3>Information Supply Chain</h3> <p> - To help understand how Brainwash Dataset has been used around the world by commercial, military, and academic organizations; existing publicly available research citing Brainwash Dataset was collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the markers to reveal research projects at that location. + To help understand how Brainwash Dataset has been used around the world by commercial, military, and academic organizations; existing publicly available research citing Brainwash Dataset was collected, verified, and geocoded to show how AI training data has proliferated around the world. Click on the markers to reveal research projects at that location. </p> </section> @@ -145,7 +145,12 @@ <h2>Supplementary Information</h2> -</section><section class='images'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/brainwash/assets/brainwash_grid.jpg' alt=' Nine of 11,917 images from the the Brainwash dataset. Graphic: megapixels.cc based on Brainwash dataset by Russel et. al. License: <a href="https://opendatacommons.org/licenses/pddl/summary/index.html">Open Data Commons Public Domain Dedication</a> (PDDL)'><div class='caption'> Nine of 11,917 images from the the Brainwash dataset. Graphic: megapixels.cc based on Brainwash dataset by Russel et. al. License: <a href="https://opendatacommons.org/licenses/pddl/summary/index.html">Open Data Commons Public Domain Dedication</a> (PDDL)</div></div></section><section> +</section><section class='images'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/brainwash/assets/brainwash_grid.jpg' alt=' Nine of 11,917 images from the the Brainwash dataset. Graphic: megapixels.cc based on Brainwash dataset by Russel et. al. License: <a href="https://opendatacommons.org/licenses/pddl/summary/index.html">Open Data Commons Public Domain Dedication</a> (PDDL)'><div class='caption'> Nine of 11,917 images from the the Brainwash dataset. Graphic: megapixels.cc based on Brainwash dataset by Russel et. al. License: <a href="https://opendatacommons.org/licenses/pddl/summary/index.html">Open Data Commons Public Domain Dedication</a> (PDDL)</div></div></section><section><h3>Press Coverage</h3> +<ul> +<li>New York Times: <a href="https://www.nytimes.com/2019/07/13/technology/">Facial Recognition Tech Is Growing Stronger, Thanks to Your Face</a></li> +<li>De Tijd: <a href="https://www.tijd.be/dossier/legrandinconnu/brainwash/10136670.html">Brainwash</a></li> +</ul> +</section><section> <h4>Cite Our Work</h4> <p> diff --git a/site/public/datasets/duke_mtmc/index.html b/site/public/datasets/duke_mtmc/index.html index fc141450..351606cb 100644 --- a/site/public/datasets/duke_mtmc/index.html +++ b/site/public/datasets/duke_mtmc/index.html @@ -55,8 +55,8 @@ </header> <div class="content content-dataset"> - <section class='intro_section' style='background-image: url(https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/duke_mtmc/assets/background.jpg)'><div class='inner'><div class='hero_desc'><span class='bgpad'><span class="dataset-name">Duke MTMC</span> is a dataset of surveillance camera footage of students on Duke University campus</span></div><div class='hero_subdesc'><span class='bgpad'>Duke MTMC contains over 2 million video frames and 2,700 unique identities collected from 8 HD cameras at Duke University campus in March 2014 -</span></div></div></section><section><h2>Duke MTMC</h2> + <section class='intro_section' style='background-image: url(https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/duke_mtmc/assets/background.jpg)'></section><section><div class='image'><div class='intro-caption caption'>A still frame from the Duke MTMC (Multi-Target-Multi-Camera) CCTV dataset captured on Duke University campus in 2014. The dataset has now been terminated by the author in response to this report.</div></div></section><section><h1>Duke MTMC</h1> +<p><em>Update: In response to this report and an <a href="https://www.ft.com/content/cf19b956-60a2-11e9-b285-3acd5d43599e">investigation</a> by the Financial Times, Duke University has terminated the Duke MTMC dataset.</em></p> </section><section><div class='right-sidebar'><div class='meta'> <div class='gray'>Published</div> <div>2016</div> @@ -75,7 +75,8 @@ </div><div class='meta'> <div class='gray'>Website</div> <div><a href='http://vision.cs.duke.edu/DukeMTMC/' target='_blank' rel='nofollow noopener'>duke.edu</a></div> - </div></div><p>Duke MTMC (Multi-Target, Multi-Camera) is a dataset of surveillance video footage taken on Duke University's campus in 2014 and is used for research and development of video tracking systems, person re-identification, and low-resolution facial recognition. The dataset contains over 14 hours of synchronized surveillance video from 8 cameras at 1080p and 60 FPS, with over 2 million frames of 2,000 students walking to and from classes. The 8 surveillance cameras deployed on campus were specifically setup to capture students "during periods between lectures, when pedestrian traffic is heavy".<a class="footnote_shim" name="[^duke_mtmc_orig]_1"> </a><a href="#[^duke_mtmc_orig]" class="footnote" title="Footnote 1">1</a></p> + </div></div><p>Duke MTMC (Multi-Target, Multi-Camera) is a dataset of surveillance video footage taken on Duke University's campus in 2014 and is used for research and development of video tracking systems, person re-identification, and low-resolution facial recognition.</p> +<p>The dataset contains over 14 hours of synchronized surveillance video from 8 cameras at 1080p and 60 FPS, with over 2 million frames of 2,000 students walking to and from classes. The 8 surveillance cameras deployed on campus were specifically setup to capture students "during periods between lectures, when pedestrian traffic is heavy".<a class="footnote_shim" name="[^duke_mtmc_orig]_1"> </a><a href="#[^duke_mtmc_orig]" class="footnote" title="Footnote 1">1</a></p> <p>For this analysis of the Duke MTMC dataset over 100 publicly available research papers that used the dataset were analyzed to find out who's using the dataset and where it's being used. The results show that the Duke MTMC dataset has spread far beyond its origins and intentions in academic research projects at Duke University. Since its publication in 2016, more than twice as many research citations originated in China as in the United States. Among these citations were papers links to the Chinese military and several of the companies known to provide Chinese authorities with the oppressive surveillance technology used to monitor millions of Uighur Muslims.</p> <p>In one 2018 <a href="http://openaccess.thecvf.com/content_cvpr_2018/papers/Xu_Attention-Aware_Compositional_Network_CVPR_2018_paper.pdf">paper</a> jointly published by researchers from SenseNets and SenseTime (and funded by SenseTime Group Limited) entitled <a href="https://www.semanticscholar.org/paper/Attention-Aware-Compositional-Network-for-Person-Xu-Zhao/14ce502bc19b225466126b256511f9c05cadcb6e">Attention-Aware Compositional Network for Person Re-identification</a>, the Duke MTMC dataset was used for "extensive experiments" on improving person re-identification across multiple surveillance cameras with important applications in suspect tracking. Both SenseNets and SenseTime have been linked to the providing surveillance technology to monitor Uighur Muslims in China. <a class="footnote_shim" name="[^xinjiang_nyt]_1"> </a><a href="#[^xinjiang_nyt]" class="footnote" title="Footnote 4">4</a><a class="footnote_shim" name="[^sensetime_qz]_1"> </a><a href="#[^sensetime_qz]" class="footnote" title="Footnote 2">2</a><a class="footnote_shim" name="[^sensenets_uyghurs]_1"> </a><a href="#[^sensenets_uyghurs]" class="footnote" title="Footnote 3">3</a></p> </section><section class='images'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/duke_mtmc/assets/duke_mtmc_reid_montage.jpg' alt=' A collection of 1,600 out of the approximately 2,000 students and pedestrians in the Duke MTMC dataset. These students were also included in the Duke MTMC Re-ID dataset extension used for person re-identification, and eventually the QMUL SurvFace face recognition dataset. Open Data Commons Attribution License.'><div class='caption'> A collection of 1,600 out of the approximately 2,000 students and pedestrians in the Duke MTMC dataset. These students were also included in the Duke MTMC Re-ID dataset extension used for person re-identification, and eventually the QMUL SurvFace face recognition dataset. Open Data Commons Attribution License.</div></div></section><section><p>Despite <a href="https://www.hrw.org/news/2017/11/19/china-police-big-data-systems-violate-privacy-target-dissent">repeated</a> <a href="https://www.hrw.org/news/2018/02/26/china-big-data-fuels-crackdown-minority-region">warnings</a> by Human Rights Watch that the authoritarian surveillance used in China represents a humanitarian crisis, researchers at Duke University continued to provide open access to their dataset for anyone to use for any project. As the surveillance crisis in China grew, so did the number of citations with links to organizations complicit in the crisis. In 2018 alone there were over 90 research projects happening in China that publicly acknowledged using the Duke MTMC dataset. Amongst these were projects from CloudWalk, Hikvision, Megvii (Face++), SenseNets, SenseTime, Beihang University, China's National University of Defense Technology, and the PLA's Army Engineering University.</p> @@ -268,10 +269,10 @@ <section> - <h3>Information Supply chain</h3> + <h3>Information Supply Chain</h3> <p> - To help understand how Duke MTMC Dataset has been used around the world by commercial, military, and academic organizations; existing publicly available research citing Duke Multi-Target, Multi-Camera Tracking Project was collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the markers to reveal research projects at that location. + To help understand how Duke MTMC Dataset has been used around the world by commercial, military, and academic organizations; existing publicly available research citing Duke Multi-Target, Multi-Camera Tracking Project was collected, verified, and geocoded to show the information supply chains of people appearing in the images. Click on the markers to reveal research projects at that location. </p> </section> diff --git a/site/public/datasets/helen/index.html b/site/public/datasets/helen/index.html index 44ef462e..ffd432b9 100644 --- a/site/public/datasets/helen/index.html +++ b/site/public/datasets/helen/index.html @@ -4,7 +4,7 @@ <title>MegaPixels: HELEN</title> <meta charset="utf-8" /> <meta name="author" content="Adam Harvey" /> - <meta name="description" content="HELEN Face Dataset" /> + <meta name="description" content="HELEN is a dataset of face images from Flickr used for training facial component localization algorithms" /> <meta property="og:title" content="MegaPixels: HELEN"/> <meta property="og:type" content="website"/> <meta property="og:summary" content="MegaPixels is an art and research project about face recognition datasets created \"in the wild\"/> @@ -55,8 +55,7 @@ </header> <div class="content content-dataset"> - <section class='intro_section' style='background-image: url(https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/helen/assets/background.jpg)'><div class='inner'><div class='hero_desc'><span class='bgpad'>HELEN Face Dataset</span></div><div class='hero_subdesc'><span class='bgpad'>HELEN (under development) -</span></div></div></section><section><h2>HELEN</h2> + <section class='intro_section' style='background-image: url(https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/helen/assets/background.jpg)'></section><section><div class='image'><div class='intro-caption caption'>Example images from the HELEN dataset</div></div></section><section><h1>HELEN Dataset</h1> </section><section><div class='right-sidebar'><div class='meta'> <div class='gray'>Published</div> <div>2012</div> @@ -69,8 +68,74 @@ </div><div class='meta'> <div class='gray'>Website</div> <div><a href='http://www.ifp.illinois.edu/~vuongle2/helen/' target='_blank' rel='nofollow noopener'>illinois.edu</a></div> - </div></div><p>[ page under development ]</p> -</section><section> + </div></div><p>Helen is a dataset of annotated face images used for facial component localization. It includes 2,330 images from Flickr found by searching for "portrait" combined with terms such as "family", "wedding", "boy", "outdoor", and "studio".<a class="footnote_shim" name="[^orig_paper]_1"> </a><a href="#[^orig_paper]" class="footnote" title="Footnote 1">1</a></p> +<p>The dataset was published in 2012 with the primary motivation listed as facilitating "high quality editing of portraits". However, the paper's introduction also mentions that facial feature localization "is an essential component for face recognition, tracking and expression analysis."<a class="footnote_shim" name="[^orig_paper]_2"> </a><a href="#[^orig_paper]" class="footnote" title="Footnote 1">1</a></p> +<p>Irregardless of the authors' primary motivations, the HELEN dataset has become one of the most widely used datasets for training facial landmark algorithms, which are essential parts of most facial recogntion processing systems. Facial landmarking are used to isolate facial features such as the eyes, nose, jawline, and mouth in order to align faces to match a templated pose.</p> +</section><section class='images'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/helen/assets/montage_lms_21_14_14_14_26.png' alt=' An example annotation from the HELEN dataset showing 194 points that were originally annotated by Mechanical Turk workers. Graphic © 2019 MegaPixels.cc based on data from HELEN dataset by Le, Vuong et al.'><div class='caption'> An example annotation from the HELEN dataset showing 194 points that were originally annotated by Mechanical Turk workers. Graphic © 2019 MegaPixels.cc based on data from HELEN dataset by Le, Vuong et al.</div></div></section><section><p>This analysis shows that since its initial publication in 2012, the HELEN dataset has been used in over 200 research projects related to facial recognition with the vast majority of research taking place in China.</p> +<p>Commercial use includes IBM, NVIDIA, NEC, Microsoft Research Asia, Google, Megvii, Microsoft, Intel, Daimler, Tencent, Baidu, Adobe, Facebook</p> +<p>Military and Defense Usage includes NUDT</p> +<p><a href="http://eccv2012.unifi.it/">http://eccv2012.unifi.it/</a></p> +<p>TODO</p> +<ul> +<li>add proof of use in dlib and openface</li> +<li>add proof of use in commercial use of dlib? ibm dif</li> +<li>make landmark over blurred images</li> +<li>add 6x6 gride for landmarks</li> +<li>highlight key findings</li> +<li>highlight key commercial usage</li> +<li>look for most interesting research papers to provide example of how it's used for face recognition</li> +<li>estimated time: 6 hours</li> +<li>add data to github repo?</li> +</ul> +<table> +<thead><tr> +<th>Organization</th> +<th>Paper</th> +<th>Link</th> +<th>Year</th> +<th>Used Duke MTMC</th> +</tr> +</thead> +<tbody> +<tr> +<td>SenseTime, Amazon</td> +<td><a href="https://arxiv.org/pdf/1805.10483.pdf">Look at Boundary: A Boundary-Aware Face Alignment Algorithm</a></td> +</tr> +<tr> +<td>2018</td> +<td>year</td> +<td>✔</td> +</tr> +<tr> +<td>SenseTime</td> +<td><a href="https://arxiv.org/pdf/1807.11079.pdf">ReenactGAN: Learning to Reenact Faces via Boundary Transfer</a></td> +<td>2018</td> +<td>year</td> +<td>✔</td> +</tr> +</tbody> +</table> +<p>The dataset was used for training the OpenFace software "we used the HELEN and LFPW training subsets for training and the rest for testing" <a href="https://github.com/TadasBaltrusaitis/OpenFace/wiki/Datasets">https://github.com/TadasBaltrusaitis/OpenFace/wiki/Datasets</a></p> +<p>The popular dlib facial landmark detector was trained using HELEN</p> +<p>In addition to the 200+ verified citations, the HELEN dataset was used for</p> +<ul> +<li><a href="https://github.com/memoiry/face-alignment">https://github.com/memoiry/face-alignment</a></li> +<li><a href="http://www.dsp.toronto.edu/projects/face_analysis/">http://www.dsp.toronto.edu/projects/face_analysis/</a></li> +</ul> +<p>It's been converted into new datasets including</p> +<ul> +<li><a href="https://github.com/JPlin/Relabeled-HELEN-Dataset">https://github.com/JPlin/Relabeled-HELEN-Dataset</a></li> +<li><a href="https://www.kaggle.com/kmader/helen-eye-dataset">https://www.kaggle.com/kmader/helen-eye-dataset</a></li> +</ul> +<p>The original site</p> +<ul> +<li><a href="http://www.ifp.illinois.edu/~vuongle2/helen/">http://www.ifp.illinois.edu/~vuongle2/helen/</a></li> +</ul> +<h3>Example Images</h3> +</section><section class='images'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/helen/assets/feature_outdoor_02.jpg' alt=' An image from the HELEN dataset "wedding" category used for training face recognition 2839127417_1.jpg for outdoor studio'><div class='caption'> An image from the HELEN dataset "wedding" category used for training face recognition 2839127417_1.jpg for outdoor studio</div></div> +<div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/helen/assets/feature_graduation.jpg' alt=' An image from the HELEN dataset "wedding" category used for training face recognition 2325274893_1 '><div class='caption'> An image from the HELEN dataset "wedding" category used for training face recognition 2325274893_1 </div></div></section><section class='images'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/helen/assets/feature_wedding.jpg' alt=' An image from the HELEN dataset "wedding" category used for training face recognition 2325274893_1 '><div class='caption'> An image from the HELEN dataset "wedding" category used for training face recognition 2325274893_1 </div></div> +<div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/helen/assets/feature_wedding_02.jpg' alt=' An image from the HELEN dataset "wedding" category used for training face recognition 2325274893_1 '><div class='caption'> An image from the HELEN dataset "wedding" category used for training face recognition 2325274893_1 </div></div></section><section class='images'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/helen/assets/feature_family.jpg' alt=' Original Flickr image used in HELEN facial analysis and recognition dataset for the keyword "family". 296814969'><div class='caption'> Original Flickr image used in HELEN facial analysis and recognition dataset for the keyword "family". 296814969</div></div> +<div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/helen/assets/feature_family_05.jpg' alt=' Original Flickr image used in HELEN facial analysis and recognition dataset for the keyword "family". 296814969'><div class='caption'> Original Flickr image used in HELEN facial analysis and recognition dataset for the keyword "family". 296814969</div></div></section><section> <h3>Who used Helen Dataset?</h3> <p> @@ -91,10 +156,10 @@ <section> - <h3>Information Supply chain</h3> + <h3>Information Supply Chain</h3> <p> - To help understand how Helen Dataset has been used around the world by commercial, military, and academic organizations; existing publicly available research citing Helen Dataset was collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the markers to reveal research projects at that location. + To help understand how Helen Dataset has been used around the world by commercial, military, and academic organizations; existing publicly available research citing Helen Dataset was collected, verified, and geocoded to show how AI training data has proliferated around the world. Click on the markers to reveal research projects at that location. </p> </section> @@ -130,7 +195,10 @@ <h2>Supplementary Information</h2> +</section><section><h3>Age and Gender Distribution</h3> </section><section> + <p>Age and gender estimation distribution were calculated by anlayzing all faces in the dataset images. This may include additional faces appearing next to an annotated face, or this may skip false faces that were erroneously included as part of the original dataset. These numbers are provided as an estimation and not a factual representation of the exact gender and age of all faces.</p> +</section><section><div class='columns columns-2'><section class='applet_container'><div class='applet' data-payload='{"command": "single_pie_chart /datasets/helen/assets/age.csv", "fields": ["Caption: HELEN dataset age distribution", "Top: 10", "OtherLabel: Other"]}'></div></section><section class='applet_container'><div class='applet' data-payload='{"command": "single_pie_chart /datasets/helen/assets/gender.csv", "fields": ["Caption: HELEN dataset gender distribution", "Top: 10", "OtherLabel: Other"]}'></div></section></div></section><section class='images'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/helen/assets/montage_lms_21_15_15_7_26_0.png' alt=' Visualization of the HELEN dataset 194-point facial landmark annotations. Credit: graphic © MegaPixels.cc 2019, data from HELEN dataset by Zhou, Brand, Lin 2013. If you use this image please credit both the graphic and data source.'><div class='caption'> Visualization of the HELEN dataset 194-point facial landmark annotations. Credit: graphic © MegaPixels.cc 2019, data from HELEN dataset by Zhou, Brand, Lin 2013. If you use this image please credit both the graphic and data source.</div></div></section><section> <h4>Cite Our Work</h4> <p> @@ -147,7 +215,17 @@ }</pre> </p> -</section> +</section><section><h4>Cite the Original Author's Work</h4> +<p>If you find the HELEN dataset useful or reference it in your work, please cite the author's original work as:</p> +<pre> +@inproceedings{Le2012InteractiveFF, + title={Interactive Facial Feature Localization}, + author={Vuong Le and Jonathan Brandt and Zhe L. Lin and Lubomir D. Bourdev and Thomas S. Huang}, + booktitle={ECCV}, + year={2012} +} +</pre></section><section><h3>References</h3><section><ul class="footnotes"><li>1 <a name="[^orig_paper]" class="footnote_shim"></a><span class="backlinks"><a href="#[^orig_paper]_1">a</a><a href="#[^orig_paper]_2">b</a></span>Le, Vuong et al. “Interactive Facial Feature Localization.” ECCV (2012). +</li></ul></section></section> </div> <footer> diff --git a/site/public/datasets/ibm_dif/index.html b/site/public/datasets/ibm_dif/index.html index be5dbfe4..f9e7a91d 100644 --- a/site/public/datasets/ibm_dif/index.html +++ b/site/public/datasets/ibm_dif/index.html @@ -1,11 +1,11 @@ <!doctype html> <html> <head> - <title>MegaPixels: MegaFace</title> + <title>MegaPixels: IBM DiF</title> <meta charset="utf-8" /> <meta name="author" content="Adam Harvey" /> - <meta name="description" content="MegaFace Dataset" /> - <meta property="og:title" content="MegaPixels: MegaFace"/> + <meta name="description" content="Diversity in Faces Dataset" /> + <meta property="og:title" content="MegaPixels: IBM DiF"/> <meta property="og:type" content="website"/> <meta property="og:summary" content="MegaPixels is an art and research project about face recognition datasets created \"in the wild\"/> <meta property="og:image" content="https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/ibm_dif/assets/background.jpg" /> @@ -45,7 +45,7 @@ <a class='slogan' href="/"> <div class='logo'></div> <div class='site_name'>MegaPixels</div> - <div class='page_name'>MegaFace Dataset</div> + <div class='page_name'>IBM Diversity in Faces</div> </a> <div class='links'> <a href="/datasets/">Datasets</a> @@ -55,26 +55,19 @@ </header> <div class="content content-dataset"> - <section class='intro_section' style='background-image: url(https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/ibm_dif/assets/background.jpg)'><div class='inner'><div class='hero_desc'><span class='bgpad'>MegaFace Dataset</span></div><div class='hero_subdesc'><span class='bgpad'>MegaFace contains 670K identities and 4.7M images -</span></div></div></section><section><h2>MegaFace</h2> + <section class='intro_section' style='background-image: url(https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/ibm_dif/assets/background.jpg)'></section><section><h2>IBM Diversity in Faces</h2> </section><section><div class='right-sidebar'><div class='meta'> - <div class='gray'>Published</div> - <div>2016</div> - </div><div class='meta'> <div class='gray'>Images</div> - <div>4,753,520 </div> - </div><div class='meta'> - <div class='gray'>Identities</div> - <div>672,057 </div> + <div>1,070,000 </div> </div><div class='meta'> <div class='gray'>Purpose</div> - <div>face recognition</div> + <div>Face recognition and cranio-facial analysis</div> </div><div class='meta'> <div class='gray'>Website</div> - <div><a href='http://megaface.cs.washington.edu/' target='_blank' rel='nofollow noopener'>washington.edu</a></div> + <div><a href='https://www.research.ibm.com/artificial-intelligence/trusted-ai/diversity-in-faces/' target='_blank' rel='nofollow noopener'>ibm.com</a></div> </div></div><p>[ page under development ]</p> </section><section> - <h3>Who used MegaFace Dataset?</h3> + <h3>Who used IBM Diversity in Faces?</h3> <p> This bar chart presents a ranking of the top countries where dataset citations originated. Mouse over individual columns to see yearly totals. These charts show at most the top 10 countries. @@ -94,10 +87,10 @@ <section> - <h3>Information Supply chain</h3> + <h3>Information Supply Chain</h3> <p> - To help understand how MegaFace Dataset has been used around the world by commercial, military, and academic organizations; existing publicly available research citing MegaFace Dataset was collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the markers to reveal research projects at that location. + To help understand how IBM Diversity in Faces has been used around the world by commercial, military, and academic organizations; existing publicly available research citing Diversity in Faces Dataset was collected, verified, and geocoded to show the information supply chains of people appearing in the images. Click on the markers to reveal research projects at that location. </p> </section> diff --git a/site/public/datasets/ijb_c/index.html b/site/public/datasets/ijb_c/index.html index abe7d5ed..1795ccbd 100644 --- a/site/public/datasets/ijb_c/index.html +++ b/site/public/datasets/ijb_c/index.html @@ -55,8 +55,7 @@ </header> <div class="content content-dataset"> - <section class='intro_section' style='background-image: url(https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/ijb_c/assets/background.jpg)'><div class='inner'><div class='hero_desc'><span class='bgpad'>IARPA Janus Benchmark C is a dataset of web images used</span></div><div class='hero_subdesc'><span class='bgpad'>The IJB-C dataset contains 21,294 images and 11,779 videos of 3,531 identities -</span></div></div></section><section><h2>IARPA Janus Benchmark C (IJB-C)</h2> + <section class='intro_section' style='background-image: url(https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/ijb_c/assets/background.jpg)'></section><section><h2>IARPA Janus Benchmark C (IJB-C)</h2> </section><section><div class='right-sidebar'><div class='meta'> <div class='gray'>Published</div> <div>2017</div> @@ -147,10 +146,10 @@ <section> - <h3>Information Supply chain</h3> + <h3>Information Supply Chain</h3> <p> - To help understand how IJB-C has been used around the world by commercial, military, and academic organizations; existing publicly available research citing IARPA Janus Benchmark C was collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the markers to reveal research projects at that location. + To help understand how IJB-C has been used around the world by commercial, military, and academic organizations; existing publicly available research citing IARPA Janus Benchmark C was collected, verified, and geocoded to show the information supply chains of people appearing in the images. Click on the markers to reveal research projects at that location. </p> </section> diff --git a/site/public/datasets/index.html b/site/public/datasets/index.html index d38feb2e..c17caeb0 100644 --- a/site/public/datasets/index.html +++ b/site/public/datasets/index.html @@ -53,7 +53,7 @@ <a href="/research">Research</a> </div> </header> - <div class="content content-"> + <div class="content content-dataset-list"> <div class='dataset-heading'> @@ -97,6 +97,34 @@ </div> </a> + <a href="/datasets/helen/"> + <div class="dataset-image" style="background-image: url(https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/helen/assets/index.jpg)"></div> + <div class="dataset"> + <span class='title'>HELEN</span> + <div class='fields'> + <div class='year visible'><span>2012</span></div> + <div class='purpose'><span>facial feature localization algorithm</span></div> + + <div class='images'><span>2,330 images</span></div> + + </div> + </div> + </a> + + <a href="/datasets/megaface/"> + <div class="dataset-image" style="background-image: url(https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/megaface/assets/index.jpg)"></div> + <div class="dataset"> + <span class='title'>MegaFace</span> + <div class='fields'> + <div class='year visible'><span>2016</span></div> + <div class='purpose'><span>face recognition</span></div> + + <div class='images'><span>4,753,520 images</span></div> + + </div> + </div> + </a> + <a href="/datasets/msceleb/"> <div class="dataset-image" style="background-image: url(https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/msceleb/assets/index.jpg)"></div> <div class="dataset"> diff --git a/site/public/datasets/lfpw/index.html b/site/public/datasets/lfpw/index.html index f2ddc636..7cee2116 100644 --- a/site/public/datasets/lfpw/index.html +++ b/site/public/datasets/lfpw/index.html @@ -55,8 +55,7 @@ </header> <div class="content content-dataset"> - <section class='intro_section' style='background-image: url(https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/lfpw/assets/background.jpg)'><div class='inner'><div class='hero_desc'><span class='bgpad'>Labeled Face Parts in the Wild Dataset</span></div><div class='hero_subdesc'><span class='bgpad'>Labeled Face Parts in the Wild ... -</span></div></div></section><section><h2>Labeled Face Parts in the Wild</h2> + <section class='intro_section' style='background-image: url(https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/lfpw/assets/background.jpg)'></section><section><h2>Labeled Face Parts in the Wild</h2> </section><section><div class='right-sidebar'><div class='meta'> <div class='gray'>Published</div> <div>2011</div> @@ -69,7 +68,13 @@ </div><div class='meta'> <div class='gray'>Website</div> <div><a href='http://neerajkumar.org/databases/lfpw/' target='_blank' rel='nofollow noopener'>neerajkumar.org</a></div> - </div></div><p>[ page under development ]</p> + </div></div><p>RESEARCH below this line</p> +<blockquote><p>Release 1 of LFPW consists of 1,432 faces from images downloaded from the web using simple text queries on sites such as google.com, flickr.com, and yahoo.com. Each image was labeled by three MTurk workers, and 29 fiducial points, shown below, are included in dataset. LFPW was originally described in the following publication:</p> +<p>Due to copyright issues, we cannot distribute image files in any format to anyone. Instead, we have made available a list of image URLs where you can download the images yourself. We realize that this makes it impossible to exactly compare numbers, as image links will slowly disappear over time, but we have no other option. This seems to be the way other large web-based databases seem to be evolving.</p> +</blockquote> +<p><a href="https://neerajkumar.org/databases/lfpw/">https://neerajkumar.org/databases/lfpw/</a></p> +<blockquote><p>This research was performed at Kriegman-Belhumeur Vision Technologies and was funded by the CIA through the Office of the Chief Scientist. <a href="https://www.cs.cmu.edu/~peiyunh/topdown/">https://www.cs.cmu.edu/~peiyunh/topdown/</a> (nk_cvpr2011_faceparts.pdf)</p> +</blockquote> </section><section> <h3>Who used LFPW?</h3> @@ -91,10 +96,10 @@ <section> - <h3>Information Supply chain</h3> + <h3>Information Supply Chain</h3> <p> - To help understand how LFPW has been used around the world by commercial, military, and academic organizations; existing publicly available research citing Labeled Face Parts in the Wild was collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the markers to reveal research projects at that location. + To help understand how LFPW has been used around the world by commercial, military, and academic organizations; existing publicly available research citing Labeled Face Parts in the Wild was collected, verified, and geocoded to show the information supply chains of people appearing in the images. Click on the markers to reveal research projects at that location. </p> </section> diff --git a/site/public/datasets/megaface/index.html b/site/public/datasets/megaface/index.html index 712af28a..78f6a0cc 100644 --- a/site/public/datasets/megaface/index.html +++ b/site/public/datasets/megaface/index.html @@ -55,8 +55,7 @@ </header> <div class="content content-dataset"> - <section class='intro_section' style='background-image: url(https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/megaface/assets/background.jpg)'><div class='inner'><div class='hero_desc'><span class='bgpad'>MegaFace Dataset</span></div><div class='hero_subdesc'><span class='bgpad'>MegaFace contains 670K identities and 4.7M images -</span></div></div></section><section><h2>MegaFace</h2> + <section class='intro_section' style='background-image: url(https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/megaface/assets/background.jpg)'></section><section><div class='image'><div class='intro-caption caption'>Example images from the MegaFace dataset</div></div></section><section><h2>MegaFace</h2> </section><section><div class='right-sidebar'><div class='meta'> <div class='gray'>Published</div> <div>2016</div> @@ -72,7 +71,7 @@ </div><div class='meta'> <div class='gray'>Website</div> <div><a href='http://megaface.cs.washington.edu/' target='_blank' rel='nofollow noopener'>washington.edu</a></div> - </div></div><p>[ page under development ]</p> + </div></div><p>MegaFace is a dataset...</p> </section><section> <h3>Who used MegaFace Dataset?</h3> @@ -94,10 +93,10 @@ <section> - <h3>Information Supply chain</h3> + <h3>Information Supply Chain</h3> <p> - To help understand how MegaFace Dataset has been used around the world by commercial, military, and academic organizations; existing publicly available research citing MegaFace Dataset was collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the markers to reveal research projects at that location. + To help understand how MegaFace Dataset has been used around the world by commercial, military, and academic organizations; existing publicly available research citing MegaFace Dataset was collected, verified, and geocoded to show how AI training data has proliferated around the world. Click on the markers to reveal research projects at that location. </p> </section> @@ -133,6 +132,9 @@ <h2>Supplementary Information</h2> +</section><section><h3>Age and Gender Distribution</h3> +</section><section><div class='columns columns-2'><section class='applet_container'><div class='applet' data-payload='{"command": "single_pie_chart /datasets/megaface/assets/age.csv", "fields": ["Caption: MegaFace dataset age distribution", "Top: 10", "OtherLabel: Other"]}'></div></section><section class='applet_container'><div class='applet' data-payload='{"command": "single_pie_chart /datasets/megaface/assets/gender.csv", "fields": ["Caption: MegaFace dataset gender distribution", "Top: 10", "OtherLabel: Other"]}'></div></section></div></section><section> + <p>Age and gender estimation distribution were calculated by anlayzing all faces in the dataset images. This may include additional faces appearing next to an annotated face, or this may skip false faces that were erroneously included as part of the original dataset. These numbers are provided as an estimation and not a factual representation of the exact gender and age of all faces.</p> </section><section> <h4>Cite Our Work</h4> diff --git a/site/public/datasets/msceleb/index.html b/site/public/datasets/msceleb/index.html index 42a44571..f0da450f 100644 --- a/site/public/datasets/msceleb/index.html +++ b/site/public/datasets/msceleb/index.html @@ -55,8 +55,8 @@ </header> <div class="content content-dataset"> - <section class='intro_section' style='background-image: url(https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/msceleb/assets/background.jpg)'><div class='inner'><div class='hero_desc'><span class='bgpad'>MS Celeb is a dataset of 10 million face images harvested from the Internet</span></div><div class='hero_subdesc'><span class='bgpad'>The MS Celeb dataset includes 10 million images of 100,000 people and an additional target list of 1,000,000 individuals -</span></div></div></section><section><h2>Microsoft Celeb Dataset (MS Celeb)</h2> + <section class='intro_section' style='background-image: url(https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/msceleb/assets/background.jpg)'></section><section><div class='image'><div class='intro-caption caption'>Example images forom the MS-Celeb-1M dataset</div></div></section><section><h1>Microsoft Celeb Dataset (MS Celeb)</h1> +<p><em>Update: In response to this report and an <a href="https://www.ft.com/content/cf19b956-60a2-11e9-b285-3acd5d43599e">investigation</a> by the Financial Times, Microsoft has terminated their MS-Celeb website <a href="https://msceleb.org">https://msceleb.org</a>.</em></p> </section><section><div class='right-sidebar'><div class='meta'> <div class='gray'>Published</div> <div>2016</div> @@ -78,7 +78,8 @@ </div><div class='meta'> <div class='gray'>Website</div> <div><a href='http://www.msceleb.org/' target='_blank' rel='nofollow noopener'>msceleb.org</a></div> - </div></div><p>Microsoft Celeb (MS-Celeb-1M) is a dataset of 10 million face images harvested from the Internet for the purpose of developing face recognition technologies. According to Microsoft Research, who created and published the <a href="https://www.microsoft.com/en-us/research/publication/ms-celeb-1m-dataset-benchmark-large-scale-face-recognition-2/">dataset</a> in 2016, MS Celeb is the largest publicly available face recognition dataset in the world, containing over 10 million images of nearly 100,000 individuals. Microsoft's goal in building this dataset was to distribute an initial training dataset of 100,000 individuals' biometric data to accelerate research into recognizing a larger target list of one million people "using all the possibly collected face images of this individual on the web as training data".<a class="footnote_shim" name="[^msceleb_orig]_1"> </a><a href="#[^msceleb_orig]" class="footnote" title="Footnote 1">1</a></p> + </div><div class='meta'><div class='gray'>Press coverage</div><div><a href="https://www.ft.com/content/cf19b956-60a2-11e9-b285-3acd5d43599e">Financial Times</a>, <a href="https://www.nytimes.com/2019/07/13/technology/databases-faces-facial-recognition-technology.html">New York Times</a>, <a href="https://www.bbc.com/news/technology-48555149">BBC</a>, <a href="https://www.spiegel.de/netzwelt/web/microsoft-gesichtserkennung-datenbank-mit-zehn-millionen-fotos-geloescht-a-1271221.html">Spiegel</a>, <a href="https://www.lesechos.fr/tech-medias/intelligence-artificielle/le-mariage-explosif-de-nos-donnees-et-de-lia-1031813">Les Echos</a>, <a href="https://www.lastampa.it/2019/06/22/tecnologia/microsoft-ha-cancellato-il-suo-database-per-il-riconoscimento-facciale-PWwLGmpO1fKQdykMZVBd9H/pagina.html">La Stampa</a></div></div></div><p>Microsoft Celeb (MS-Celeb-1M) is a dataset of 10 million face images harvested from the Internet for the purpose of developing face recognition technologies.</p> +<p>According to Microsoft Research, who created and published the <a href="https://www.microsoft.com/en-us/research/publication/ms-celeb-1m-dataset-benchmark-large-scale-face-recognition-2/">dataset</a> in 2016, MS Celeb is the largest publicly available face recognition dataset in the world, containing over 10 million images of nearly 100,000 individuals. Microsoft's goal in building this dataset was to distribute an initial training dataset of 100,000 individuals' biometric data to accelerate research into recognizing a larger target list of one million people "using all the possibly collected face images of this individual on the web as training data".<a class="footnote_shim" name="[^msceleb_orig]_1"> </a><a href="#[^msceleb_orig]" class="footnote" title="Footnote 1">1</a></p> <p>While the majority of people in this dataset are American and British actors, the exploitative use of the term "celebrity" extends far beyond Hollywood. Many of the names in the MS Celeb face recognition dataset are merely people who must maintain an online presence for their professional lives: journalists, artists, musicians, activists, policy makers, writers, and academics. Many people in the target list are even vocal critics of the very technology Microsoft is using their name and biometric information to build. It includes digital rights activists like Jillian York; artists critical of surveillance including Trevor Paglen, Jill Magid, and Aram Bartholl; Intercept founders Laura Poitras, Jeremy Scahill, and Glenn Greenwald; Data and Society founder danah boyd; Shoshana Zuboff, author of <em>Surveillance Capitalism</em>; and even Julie Brill, the former FTC commissioner responsible for protecting consumer privacy.</p> <h3>Microsoft's 1 Million Target List</h3> <p>Microsoft Research distributed two main digital assets: a dataset of approximately 10,000,000 images of 100,000 individuals and a target list of exactly 1 million names. The 900,000 names without images are the target list, which is used to gather more images for each subject.</p> @@ -219,6 +220,8 @@ <p>In 2017 Microsoft Research organized a face recognition competition at the International Conference on Computer Vision (ICCV), one of the top 2 computer vision conferences worldwide, where industry and academia used the MS Celeb dataset to compete for the highest performance scores. The 2017 winner was Beijing-based OrionStar Technology Co., Ltd.. In their <a href="https://www.prnewswire.com/news-releases/orionstar-wins-challenge-to-recognize-one-million-celebrity-faces-with-artificial-intelligence-300494265.html">press release</a>, OrionStar boasted a 13% increase on the difficult set over last year's winner. The prior year's competitors included Beijing-based Faceall Technology Co., Ltd., a company providing face recognition for "smart city" applications.</p> <p>Considering the multiple citations from commercial organizations (Canon, Hitachi, IBM, Megvii/Face++, Microsoft, Microsoft Asia, SenseTime, OrionStar, Faceall), military use (National University of Defense Technology in China), the proliferation of subset data (Racial Faces in the Wild), and the real-time visible proliferation via Academic Torrents it's fairly clear that Microsoft has lost control of their MS Celeb dataset and the biometric data of nearly 100,000 individuals.</p> <p>To provide insight into where these 10 million faces images have traveled, over 100 research papers have been verified and geolocated to show who used the dataset and where they used it.</p> +<h2>GDPR and MS-Celeb</h2> +<p>[ in progress ]</p> </section><section> <h3>Who used Microsoft Celeb?</h3> @@ -240,10 +243,10 @@ <section> - <h3>Information Supply chain</h3> + <h3>Information Supply Chain</h3> <p> - To help understand how Microsoft Celeb has been used around the world by commercial, military, and academic organizations; existing publicly available research citing Microsoft Celebrity Dataset was collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the markers to reveal research projects at that location. + To help understand how Microsoft Celeb has been used around the world by commercial, military, and academic organizations; existing publicly available research citing Microsoft Celebrity Dataset was collected, verified, and geocoded to show how AI training data has proliferated around the world. Click on the markers to reveal research projects at that location. </p> </section> @@ -279,11 +282,19 @@ <h2>Supplementary Information</h2> -</section><section><h5>FAQs and Fact Check</h5> +</section><section><h3>Age and Gender Distribution</h3> +</section><section><div class='columns columns-2'><section class='applet_container'><div class='applet' data-payload='{"command": "single_pie_chart /datasets/msceleb/assets/age.csv", "fields": ["Caption: MS-Celeb dataset age distribution", "Top: 10", "OtherLabel: Other"]}'></div></section><section class='applet_container'><div class='applet' data-payload='{"command": "single_pie_chart /datasets/helen/assets/gender.csv", "fields": ["Caption: MS-Celeb dataset gender distribution", "Top: 10", "OtherLabel: Other"]}'></div></section></div></section><section><h5>FAQs and Fact Check</h5> <ul> -<li><strong>The MS Celeb images were not derived from Creative Commons sources</strong>. They were obtained by "retriev[ing] approximately 100 images per celebrity from popular search engines"<a class="footnote_shim" name="[^msceleb_orig]_2"> </a><a href="#[^msceleb_orig]" class="footnote" title="Footnote 1">1</a>. The dataset actually includes many copyrighted images. Microsoft doesn't provide any image URLs, but manually reviewing a small portion of images from the dataset shows many images with watermarked "Copyright" text over the image. TinEye could be used to more accurately determine the image origins in aggregate</li> -<li><strong>Microsoft did not distribute images of all one million people.</strong> They distributed images for about 100,000 and then encouraged other researchers to download the remaining 900,000 people "by using all the possibly collected face images of this individual on the web as training data."<a class="footnote_shim" name="[^msceleb_orig]_3"> </a><a href="#[^msceleb_orig]" class="footnote" title="Footnote 1">1</a></li> -<li><strong>Microsoft had not deleted or stopped distribution of their MS Celeb at the time of most press reports on June 4.</strong> Until at least June 6, 2019 the Microsoft Research data portal provided the MS Celeb dataset for download: <a href="http://web.archive.org/web/20190606150005/https://msropendata.com/datasets/98fdfc70-85ee-5288-a69f-d859bbe9c737">http://web.archive.org/web/20190606150005/https://msropendata.com/datasets/98fdfc70-85ee-5288-a69f-d859bbe9c737</a></li> +<li><strong>Despite several erroneous reports mentioning the MS-Celeb images were derived from Creative Commons licensed media, the MS Celeb images were obtained from web search engines</strong>. The authors mention "they were obtained by "retriev[ing] approximately 100 images per celebrity from popular search engines"<a class="footnote_shim" name="[^msceleb_orig]_2"> </a><a href="#[^msceleb_orig]" class="footnote" title="Footnote 1">1</a>. Many, if not the vast majority, are copyrighted images. Microsoft doesn't provide image URLs, but manually reviewing a small portion of images from the dataset shows images with watermarked "Copyright" text over the image and sources including stock photo agencies such as Getty. TinEye could be used to more accurately determine the image origins in aggregate.</li> +<li><strong>Most reports incorrectly reported that Microsoft distributed images of all one million people. As this analysis mentions several times, Microsoft distributed images for 100,000 people and a separate target list of 900,000 more names.</strong> Other researchers where then expected and encouraged to download the remaining 900,000 people "by using all the possibly collected face images of this individual on the web as training data."<a class="footnote_shim" name="[^msceleb_orig]_3"> </a><a href="#[^msceleb_orig]" class="footnote" title="Footnote 1">1</a></li> +<li><strong>Microsoft claimed that they had deleted or stopped distribution of their MS Celeb dataset in April 2019 after the Financial Times investigation. This false.</strong> Until at least June 6, 2019 the Microsoft Research data portal freely provided the full MS Celeb dataset download: <a href="http://web.archive.org/web/20190606150005/https://msropendata.com/datasets/98fdfc70-85ee-5288-a69f-d859bbe9c737">http://web.archive.org/web/20190606150005/https://msropendata.com/datasets/98fdfc70-85ee-5288-a69f-d859bbe9c737</a></li> +</ul> +<h3>Press Coverage</h3> +<ul> +<li>Financial Times (original story): <a href="https://www.ft.com/content/cf19b956-60a2-11e9-b285-3acd5d43599e">Who’s using your face? The ugly truth about facial recognition</a> </li> +<li>New York Times (front page story): <a href="https://www.nytimes.com/2019/07/13/technology/databases-faces-facial-recognition-technology.html">Facial Recognition Tech Is Growing Stronger, Thanks to Your Face</a></li> +<li>BBC: <a href="https://www.bbc.com/news/technology-48555149">Microsoft deletes massive face recognition database</a></li> +<li>Spiegel: <a href="https://www.spiegel.de/netzwelt/web/microsoft-gesichtserkennung-datenbank-mit-zehn-millionen-fotos-geloescht-a-1271221.html">Microsoft löscht Datenbank mit zehn Millionen Fotos</a></li> </ul> </section><section><h3>References</h3><section><ul class="footnotes"><li>1 <a name="[^msceleb_orig]" class="footnote_shim"></a><span class="backlinks"><a href="#[^msceleb_orig]_1">a</a><a href="#[^msceleb_orig]_2">b</a><a href="#[^msceleb_orig]_3">c</a></span>MS-Celeb-1M: A Dataset and Benchmark for Large-Scale Face Recognition. Accessed April 18, 2019. <a href="http://web.archive.org/web/20190418151913/http://msceleb.org/">http://web.archive.org/web/20190418151913/http://msceleb.org/</a> </li><li>2 <a name="[^madhu_ft]" class="footnote_shim"></a><span class="backlinks"><a href="#[^madhu_ft]_1">a</a></span>Murgia, Madhumita. Microsoft worked with Chinese military university on artificial intelligence. Financial Times. April 10, 2019. diff --git a/site/public/datasets/oxford_town_centre/index.html b/site/public/datasets/oxford_town_centre/index.html index 11fb436f..24a5623d 100644 --- a/site/public/datasets/oxford_town_centre/index.html +++ b/site/public/datasets/oxford_town_centre/index.html @@ -55,8 +55,7 @@ </header> <div class="content content-dataset"> - <section class='intro_section' style='background-image: url(https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/oxford_town_centre/assets/background.jpg)'><div class='inner'><div class='hero_desc'><span class='bgpad'>Oxford Town Centre is a dataset of surveillance camera footage from Cornmarket St Oxford, England</span></div><div class='hero_subdesc'><span class='bgpad'>The Oxford Town Centre dataset includes approximately 2,200 identities and is used for research and development of face recognition systems -</span></div></div></section><section><h2>Oxford Town Centre</h2> + <section class='intro_section' style='background-image: url(https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/oxford_town_centre/assets/background.jpg)'></section><section><div class='image'><div class='intro-caption caption'>A still frame from the Oxford Town Centre CCTV video-dataset</div></div></section><section><h1>Oxford Town Centre</h1> </section><section><div class='right-sidebar'><div class='meta'> <div class='gray'>Published</div> <div>2009</div> @@ -78,7 +77,8 @@ </div><div class='meta'> <div class='gray'>Website</div> <div><a href='http://www.robots.ox.ac.uk/ActiveVision/Research/Projects/2009bbenfold_headpose/project.html' target='_blank' rel='nofollow noopener'>ox.ac.uk</a></div> - </div></div><p>The Oxford Town Centre dataset is a CCTV video of pedestrians in a busy downtown area in Oxford used for research and development of activity and face recognition systems.<a class="footnote_shim" name="[^ben_benfold_orig]_1"> </a><a href="#[^ben_benfold_orig]" class="footnote" title="Footnote 1">1</a> The CCTV video was obtained from a surveillance camera at the corner of Cornmarket and Market St. in Oxford, England and includes approximately 2,200 people. Since its publication in 2009<a class="footnote_shim" name="[^guiding_surveillance]_1"> </a><a href="#[^guiding_surveillance]" class="footnote" title="Footnote 2">2</a> the <a href="http://www.robots.ox.ac.uk/ActiveVision/Research/Projects/2009bbenfold_headpose/project.html">Oxford Town Centre dataset</a> has been used in over 80 verified research projects including commercial research by Amazon, Disney, OSRAM, and Huawei; and academic research in China, Israel, Russia, Singapore, the US, and Germany among dozens more.</p> + </div></div><p>The Oxford Town Centre dataset is a CCTV video of pedestrians in a busy downtown area in Oxford used for research and development of activity and face recognition systems.<a class="footnote_shim" name="[^ben_benfold_orig]_1"> </a><a href="#[^ben_benfold_orig]" class="footnote" title="Footnote 1">1</a></p> +<p>The CCTV video was obtained from a surveillance camera at the corner of Cornmarket and Market St. in Oxford, England and includes approximately 2,200 people. Since its publication in 2009<a class="footnote_shim" name="[^guiding_surveillance]_1"> </a><a href="#[^guiding_surveillance]" class="footnote" title="Footnote 2">2</a> the <a href="http://www.robots.ox.ac.uk/ActiveVision/Research/Projects/2009bbenfold_headpose/project.html">Oxford Town Centre dataset</a> has been used in over 80 verified research projects including commercial research by Amazon, Disney, OSRAM, and Huawei; and academic research in China, Israel, Russia, Singapore, the US, and Germany among dozens more.</p> <p>The Oxford Town Centre dataset is unique in that it uses footage from a public surveillance camera that would otherwise be designated for public safety. The video shows that the pedestrians act normally and unrehearsed indicating they neither knew of nor consented to participation in the research project.</p> </section><section> <h3>Who used TownCentre?</h3> @@ -101,10 +101,10 @@ <section> - <h3>Information Supply chain</h3> + <h3>Information Supply Chain</h3> <p> - To help understand how TownCentre has been used around the world by commercial, military, and academic organizations; existing publicly available research citing Oxford Town Centre was collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the markers to reveal research projects at that location. + To help understand how TownCentre has been used around the world by commercial, military, and academic organizations; existing publicly available research citing Oxford Town Centre was collected, verified, and geocoded to show how AI training data has proliferated around the world. Click on the markers to reveal research projects at that location. </p> </section> diff --git a/site/public/datasets/pipa/index.html b/site/public/datasets/pipa/index.html index 95b288fb..dfc54654 100644 --- a/site/public/datasets/pipa/index.html +++ b/site/public/datasets/pipa/index.html @@ -55,8 +55,7 @@ </header> <div class="content content-dataset"> - <section class='intro_section' style='background-image: url(https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/pipa/assets/background.jpg)'><div class='inner'><div class='hero_desc'><span class='bgpad'>PIPA ...</span></div><div class='hero_subdesc'><span class='bgpad'>PIPA ... -</span></div></div></section><section><h2>MegaFace</h2> + <section class='intro_section' style='background-image: url(https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/pipa/assets/background.jpg)'></section><section><h2>PIPA: People in Photo Albums</h2> </section><section><div class='right-sidebar'><div class='meta'> <div class='gray'>Published</div> <div>2015</div> @@ -97,10 +96,10 @@ <section> - <h3>Information Supply chain</h3> + <h3>Information Supply Chain</h3> <p> - To help understand how PIPA Dataset has been used around the world by commercial, military, and academic organizations; existing publicly available research citing People in Photo Albums Dataset was collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the markers to reveal research projects at that location. + To help understand how PIPA Dataset has been used around the world by commercial, military, and academic organizations; existing publicly available research citing People in Photo Albums Dataset was collected, verified, and geocoded to show the information supply chains of people appearing in the images. Click on the markers to reveal research projects at that location. </p> </section> diff --git a/site/public/datasets/uccs/index.html b/site/public/datasets/uccs/index.html index 2dcf88a1..c7816b1f 100644 --- a/site/public/datasets/uccs/index.html +++ b/site/public/datasets/uccs/index.html @@ -55,8 +55,8 @@ </header> <div class="content content-dataset"> - <section class='intro_section' style='background-image: url(https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/uccs/assets/background.jpg)'><div class='inner'><div class='hero_desc'><span class='bgpad'><span class="dataset-name">UnConstrained College Students</span> is a dataset of long-range surveillance photos of students on University of Colorado in Colorado Springs campus</span></div><div class='hero_subdesc'><span class='bgpad'>The UnConstrained College Students dataset includes 16,149 images of 1,732 students, faculty, and pedestrians and is used for developing face recognition and face detection algorithms -</span></div></div></section><section><h2>UnConstrained College Students</h2> + <section class='intro_section' style='background-image: url(https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/uccs/assets/background.jpg)'></section><section><div class='image'><div class='intro-caption caption'>One of 16,149 images form the UnConstrained College Students face recognition dataset captured at University of Colorado, Colorado Springs</div></div></section><section><h1>UnConstrained College Students</h1> +<p><em>Update: In response to this report and its previous publication of metadata from UCCS dataset photos, UCCS has temporarily suspended its dataset, but plans to release a new version.</em></p> </section><section><div class='right-sidebar'><div class='meta'> <div class='gray'>Images</div> <div>16,149 </div> @@ -75,7 +75,8 @@ </div><div class='meta'> <div class='gray'>Website</div> <div><a href='http://vast.uccs.edu/Opensetface/' target='_blank' rel='nofollow noopener'>uccs.edu</a></div> - </div></div><p>UnConstrained College Students (UCCS) is a dataset of long-range surveillance photos captured at University of Colorado Colorado Springs developed primarily for research and development of "face detection and recognition research towards surveillance applications"<a class="footnote_shim" name="[^uccs_vast]_1"> </a><a href="#[^uccs_vast]" class="footnote" title="Footnote 1">1</a>. According to the authors of <a href="https://www.semanticscholar.org/paper/Unconstrained-Face-Detection-and-Open-Set-Face-G%C3%BCnther-Hu/d4f1eb008eb80595bcfdac368e23ae9754e1e745">two</a> <a href="https://www.semanticscholar.org/paper/Large-scale-unconstrained-open-set-face-database-Sapkota-Boult/07fcbae86f7a3ad3ea1cf95178459ee9eaf77cb1">papers</a> associated with the dataset, over 1,700 students and pedestrians were "photographed using a long-range high-resolution surveillance camera without their knowledge".<a class="footnote_shim" name="[^funding_uccs]_1"> </a><a href="#[^funding_uccs]" class="footnote" title="Footnote 3">3</a> This analysis examines the <a href="http://vast.uccs.edu/Opensetface/">UCCS dataset</a> contents of the <a href="">dataset</a>, its funding sources, timestamp data, and information from publicly available research project citations.</p> + </div></div><p>UnConstrained College Students (UCCS) is a dataset of long-range surveillance photos captured at University of Colorado Colorado Springs developed primarily for research and development of "face detection and recognition research towards surveillance applications"<a class="footnote_shim" name="[^uccs_vast]_1"> </a><a href="#[^uccs_vast]" class="footnote" title="Footnote 1">1</a>.</p> +<p>According to the authors of <a href="https://www.semanticscholar.org/paper/Unconstrained-Face-Detection-and-Open-Set-Face-G%C3%BCnther-Hu/d4f1eb008eb80595bcfdac368e23ae9754e1e745">two</a> <a href="https://www.semanticscholar.org/paper/Large-scale-unconstrained-open-set-face-database-Sapkota-Boult/07fcbae86f7a3ad3ea1cf95178459ee9eaf77cb1">papers</a> associated with the dataset, over 1,700 students and pedestrians were "photographed using a long-range high-resolution surveillance camera without their knowledge".<a class="footnote_shim" name="[^funding_uccs]_1"> </a><a href="#[^funding_uccs]" class="footnote" title="Footnote 3">3</a> This analysis examines the <a href="http://vast.uccs.edu/Opensetface/">UCCS dataset</a> contents of the <a href="">dataset</a>, its funding sources, timestamp data, and information from publicly available research project citations.</p> <p>The UCCS dataset includes over 1,700 unique identities, most of which are students walking to and from class. In 2018, it was the "largest surveillance [face recognition] benchmark in the public domain."<a class="footnote_shim" name="[^surv_face_qmul]_1"> </a><a href="#[^surv_face_qmul]" class="footnote" title="Footnote 4">4</a> The photos were taken during the spring semesters of 2012 – 2013 on the West Lawn of the University of Colorado Colorado Springs campus. The photographs were timed to capture students during breaks between their scheduled classes in the morning and afternoon during Monday through Thursday. "For example, a student taking Monday-Wednesday classes at 12:30 PM will show up in the camera on almost every Monday and Wednesday."<a class="footnote_shim" name="[^sapkota_boult]_1"> </a><a href="#[^sapkota_boult]" class="footnote" title="Footnote 2">2</a>.</p> </section><section class='images'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/uccs/assets/uccs_map_aerial.jpg' alt=' The location at University of Colorado Colorado Springs where students were surreptitiously photographed with a long-range surveillance camera for use in a defense and intelligence agency funded research project on face recognition. Image: Google Maps'><div class='caption'> The location at University of Colorado Colorado Springs where students were surreptitiously photographed with a long-range surveillance camera for use in a defense and intelligence agency funded research project on face recognition. Image: Google Maps</div></div></section><section><p>The long-range surveillance images in the UnConsrained College Students dataset were taken using a Canon 7D 18-megapixel digital camera fitted with a Sigma 800mm F5.6 EX APO DG HSM telephoto lens and pointed out an office window across the university's West Lawn. The students were photographed from a distance of approximately 150 meters through an office window. "The camera [was] programmed to start capturing images at specific time intervals between classes to maximize the number of faces being captured."<a class="footnote_shim" name="[^sapkota_boult]_2"> </a><a href="#[^sapkota_boult]" class="footnote" title="Footnote 2">2</a> Their setup made it impossible for students to know they were being photographed, providing the researchers with realistic surveillance images to help build face recognition systems for real world applications for defense, intelligence, and commercial partners.</p> @@ -107,10 +108,10 @@ Their setup made it impossible for students to know they were being photographed <section> - <h3>Information Supply chain</h3> + <h3>Information Supply Chain</h3> <p> - To help understand how UCCS has been used around the world by commercial, military, and academic organizations; existing publicly available research citing UnConstrained College Students Dataset was collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the markers to reveal research projects at that location. + To help understand how UCCS has been used around the world by commercial, military, and academic organizations; existing publicly available research citing UnConstrained College Students Dataset was collected, verified, and geocoded to show how AI training data has proliferated around the world. Click on the markers to reveal research projects at that location. </p> </section> diff --git a/site/public/datasets/who_goes_there/index.html b/site/public/datasets/who_goes_there/index.html index a00fd151..b57bf469 100644 --- a/site/public/datasets/who_goes_there/index.html +++ b/site/public/datasets/who_goes_there/index.html @@ -55,8 +55,7 @@ </header> <div class="content content-dataset"> - <section class='intro_section' style='background-image: url(https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/who_goes_there/assets/background.jpg)'><div class='inner'><div class='hero_desc'><span class='bgpad'>Who Goes There Dataset</span></div><div class='hero_subdesc'><span class='bgpad'>Who Goes There (page under development) -</span></div></div></section><section><h2>Who Goes There</h2> + <section class='intro_section' style='background-image: url(https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/who_goes_there/assets/background.jpg)'></section><section><h2>Who Goes There</h2> </section><section><div class='right-sidebar'></div><p>[ page under development ]</p> </section><section> <h3>Who used Who Goes There Dataset?</h3> @@ -79,10 +78,10 @@ <section> - <h3>Information Supply chain</h3> + <h3>Information Supply Chain</h3> <p> - To help understand how Who Goes There Dataset has been used around the world by commercial, military, and academic organizations; existing publicly available research citing WhoGoesThere was collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the markers to reveal research projects at that location. + To help understand how Who Goes There Dataset has been used around the world by commercial, military, and academic organizations; existing publicly available research citing WhoGoesThere was collected, verified, and geocoded to show the information supply chains of people appearing in the images. Click on the markers to reveal research projects at that location. </p> </section> diff --git a/site/public/research/index.html b/site/public/research/index.html index f4f90531..2fb87df3 100644 --- a/site/public/research/index.html +++ b/site/public/research/index.html @@ -60,7 +60,7 @@ <a href='/research/munich_security_conference/'><section class='wide' style='background-image: url(https://nyc3.digitaloceanspaces.com/megapixels/v1/site/research/munich_security_conference/assets/background.jpg);' /> <section> <h4><span class='bgpad'>28 June 2019</span></h4> - <h2><span class='bgpad'>Analyzing Transnational Flows of Face Recognition Image Training Data</span></h2> + <h2><span class='bgpad'>Transnational Flows of Face Recognition Image Training Data</span></h2> <h3><span class='bgpad'>Where does face data originate and who's using it?</span></h3> <h4 class='readmore'><span class='bgpad'>Read more...</span></h4> </section> diff --git a/site/public/research/munich_security_conference/index.html b/site/public/research/munich_security_conference/index.html index fc44bfd8..b43df151 100644 --- a/site/public/research/munich_security_conference/index.html +++ b/site/public/research/munich_security_conference/index.html @@ -55,9 +55,8 @@ </header> <div class="content content-dataset"> - <section class='intro_section' style='background-image: url(https://nyc3.digitaloceanspaces.com/megapixels/v1/site/research/munich_security_conference/assets/background.jpg)'><div class='inner'><div class='hero_desc'><span class='bgpad'>Transnational Flows of Face Recognition Image Training Data</span></div><div class='hero_subdesc'><span class='bgpad'>Where does face data originate and who's using it? -</span></div></div></section><section><p><em>A case study on publicly available facial recognition datasets for the Munich Security Conference's Transnational Security Report</em></p> -</section><section><div class='right-sidebar'><div class='meta'><div class='gray'>Images Analyzed</div><div>24,302,637</div></div><div class='meta'><div class='gray'>Datasets Analyzed</div><div>30</div></div><div class='meta'><div class='gray'>Years</div><div>2006 - 2018</div></div><div class='meta'><div class='gray'>Last Updated</div><div>July 7, 2019</div></div><div class='meta'><div class='gray'>Text and Research</div><div>Adam Harvey</div></div></div><p>National AI strategies often rely on transnational data sources to capitalize on recent advancements in deep learning and neural networks. Researchers benefiting from these transnational data flows can yield quick and significant gains across diverse sectors from health care to biometrics. But new challenges emerge when national AI strategies collide with national interests.</p> + <section class='intro_section' style='background-image: url(https://nyc3.digitaloceanspaces.com/megapixels/v1/site/research/munich_security_conference/assets/background.jpg)'></section><section><p><em>A case study on publicly available facial recognition datasets for the Munich Security Conference's Transnational Security Report</em></p> +</section><section><div class='right-sidebar'><div class='meta'><div class='gray'>Images Analyzed</div><div>24,302,637</div></div><div class='meta'><div class='gray'>Datasets Analyzed</div><div>30</div></div><div class='meta'><div class='gray'>Years</div><div>2006 - 2018</div></div><div class='meta'><div class='gray'>Last Updated</div><div>July 7, 2019</div></div><div class='meta'><div class='gray'>Text and Research</div><div>Adam Harvey</div></div><div class='meta'><div class='gray'>Published in</div><div><a href="https://tsr.securityconference.de/">Transnational Security Report</a></div></div></div><p>National AI strategies often rely on transnational data sources to capitalize on recent advancements in deep learning and neural networks. Researchers benefiting from these transnational data flows can yield quick and significant gains across diverse sectors from health care to biometrics. But new challenges emerge when national AI strategies collide with national interests.</p> <p>Our <a href="https://www.ft.com/content/cf19b956-60a2-11e9-b285-3acd5d43599e">earlier research</a> on the <a href="/datasets/msceleb">MS Celeb</a> and <a href="/datasets/duke_mtmc">Duke</a> datasets published with the Financial Times revealed that several computer vision image datasets created by US companies and universities were unexpectedly also used for research by the National University of Defense Technology in China, along with top Chinese surveillance firms including SenseTime, SenseNets, CloudWalk, Hikvision, and Megvii/Face++ which have all been linked to oppressive surveillance in the Xinjiang region of China.</p> <p>In this new research for the <a href="https://tsr.securityconference.de">Munich Security Conference's Transnational Security Report</a> we provide summary statistics about the origins and endpoints of facial recognition information supply chains. To make it more personal, we gathered additional data on the number of public photos from embassies that are currently being used in facial recognition training datasets.</p> <div style="display:inline;" class="columns columns-1"><div class="column"><div style="background:#202020;border-radius:6px;padding:20px;width:100%"> diff --git a/todo.md b/todo.md deleted file mode 100644 index dc7ebaad..00000000 --- a/todo.md +++ /dev/null @@ -1,130 +0,0 @@ -# TODO - -## Global - -- JL: mobile CSS - - lightbox/modal on mobile, close button not visible - - decrease font-size of intro header -- AH: change intro heads to match twitter word counts better -- AH: ensure one good graphic per dataset page for social sharing -- AH: add social share graphic for homepage -- AH: add press kit/downloads - -## Splash - -- AH: create high quality 3d heads -- JL/AH: add IJB-C names to word cloud - -## Datasets - -- JL: this paper isn't appearing in the UCCS list of verified papers but should be included https://arxiv.org/pdf/1708.02337.pdf -- AH: add dataset analysis for IJB-C, HRT Transgender, MegaFace, PIPA - -## About - -- ok - -## Flickr Analysis - -Collect Flickr IDs and metadata for: - -- adience -- flickr_faces -- geofaces -- h3d -- helen -- ibm_dif -- images_of_groups -- leeds_sports_pose -- leeds_sports_pose_extended -- lfpw -- mafa -- me_glass -- megaage -- megaface -- moments_in_time -- pipa -- stickmen_family -- tiny_faces -- ucf_crowd -- used -- voc -- who_goes_there -- yfcc_100m - - -## FT Analysis: - -- [x] Brainwash -- [x] Duke MTMC -- [x] UCCS -- [x] MSCeleb -- [ ] IJB-C (and IJB-A/B?) -- [ ] HRT Transgender -- [x] Town Centre - -## NYT Analysis: - -- [ ] Helen -- [ ] MegaFace -- [ ] PIPA - -## Verifications - -- [x] Brainwash -- [x] Duke MTMC -- [ ] Helen -- [x] UCCS -- [ ] MegaFace -- [x] MSCeleb -- [ ] PIPA -- [x] IJB-C (and IJB-A/B?) -- [x] HRT Transgender -- [x] Town Centre - - ------------ - -## Datasets for next launch: - -April 4th launch - -- AFAD -- 50 People One Question -- AFLW -- AFW -- Brainwash -- CASIA Webface -- CAVIAR -- CelebA -- COFW -- DukeMTC -- Face Tracer -- Helen -- HRT Transgender -- iLIDS IVD -- IJB-C -- LFPW -- LFW -- MegaFace -- MS Celeb -- MSRA -- PubFig -- PubFig83 -- UCCS -- UMD Faces -- VGG Face 1? -- VGG Face 2 -- YouTube Celebrities -- YouTube Makeup -- Face Scrub -- Adience -- CUHK -- Kin CTTS -- LAOFIW -- UCF Selfie -- USED -- TinyFaces -- Pipa -- Shakecam - |
