summaryrefslogtreecommitdiff
path: root/megapixels/commands/datasets
diff options
context:
space:
mode:
Diffstat (limited to 'megapixels/commands/datasets')
-rw-r--r--megapixels/commands/datasets/50people.py129
-rw-r--r--megapixels/commands/datasets/feret.py139
-rw-r--r--megapixels/commands/datasets/filter_by_pose.py96
-rw-r--r--megapixels/commands/datasets/gen_filepath.py102
-rw-r--r--megapixels/commands/datasets/gen_uuid.py65
-rw-r--r--megapixels/commands/datasets/identity_meta_lfw.py93
-rw-r--r--megapixels/commands/datasets/identity_meta_vgg_face2.py88
-rw-r--r--megapixels/commands/datasets/lookup.py63
-rw-r--r--megapixels/commands/datasets/megaface_flickr_api.py141
-rw-r--r--megapixels/commands/datasets/megaface_names.py65
-rw-r--r--megapixels/commands/datasets/records.py167
-rw-r--r--megapixels/commands/datasets/s3_sync.py61
-rw-r--r--megapixels/commands/datasets/symlink_uuid.py57
-rw-r--r--megapixels/commands/datasets/vecs_to_id.py50
-rw-r--r--megapixels/commands/datasets/vecs_to_uuid.py56
-rw-r--r--megapixels/commands/datasets/ytmu.py205
16 files changed, 1577 insertions, 0 deletions
diff --git a/megapixels/commands/datasets/50people.py b/megapixels/commands/datasets/50people.py
new file mode 100644
index 00000000..fb35b2fe
--- /dev/null
+++ b/megapixels/commands/datasets/50people.py
@@ -0,0 +1,129 @@
+from glob import glob
+import os
+from os.path import join
+from pathlib import Path
+
+import click
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+from app.utils import logger_utils
+
+import dlib
+import pandas as pd
+from PIL import Image, ImageOps, ImageFilter
+from app.utils import file_utils, im_utils
+
+
+log = logger_utils.Logger.getLogger()
+
+@click.command()
+@click.option('-i', '--input', 'opt_fp_in', required=True,
+ help='Input directory')
+@click.option('-o', '--output', 'opt_fp_out',
+ help='Output directory')
+@click.option('--media', 'opt_dir_media',
+ help='Output directory')
+@click.option('--action', 'opt_action',
+ type=click.Choice(['download']),
+ default='info',
+ help='Command action')
+@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None),
+ help='Slice list of files')
+@click.pass_context
+def cli(ctx, opt_fp_in, opt_fp_out, opt_dir_media, opt_action, opt_slice):
+ """YTMU utils"""
+
+
+ from tqdm import tqdm
+
+ # -------------------------------------------------
+ # process
+
+ if opt_action == 'download':
+ # downloads video files with ytdl
+ handle_download(opt_fp_in, opt_fp_out, opt_slice)
+ elif opt_action == 'face_frames':
+ handle_face_frames(opt_fp_in, opt_fp_out, dir_media, opt_slice)
+
+
+
+
+
+def handle_face_frames(fp_in, dir_out, dir_videos):
+ if not dir_out or not dir_videos:
+ log.error('-o/--output and --videos required')
+ return
+
+ import cv2 as cv
+ from tqdm import tqdm
+ from app.processors import face_detector
+ detector = face_detector.DetectorDLIBCNN()
+
+ # get file list
+ fp_videos = glob(join(dir_videos, '*.mp4'))
+ fp_videos += glob(join(dir_videos, '*.webm'))
+ fp_videos += glob(join(dir_videos, '*.mkv'))
+
+ face_interval = 30
+ frame_interval_count = 0
+ frame_count = 0
+
+ file_utils.mkdirs(dir_out)
+
+ for fp_video in tqdm(fp_videos):
+ # log.debug('opening: {}'.format(fp_video))
+ video = cv.VideoCapture(fp_video)
+ while video.isOpened():
+ res, frame = video.read()
+ if not res:
+ break
+
+ frame_count += 1 # for naming
+ frame_interval_count += 1 # for interval
+ bboxes = detector.detect(frame, opt_size=(320, 240), opt_pyramids=0)
+ if len(bboxes) > 0 and frame_interval_count >= face_interval:
+ # save frame
+ fp_frame = join(dir_out, '{}_{}.jpg'.format(Path(fp_video).stem, file_utils.zpad(frame_count)))
+ cv.imwrite(fp_frame, frame)
+ frame_interval_count = 0
+
+
+def handle_download(fp_in, dir_out, opt_slice):
+ import youtube_dl
+ df = pd.read_csv(fp_in)
+ if opt_slice:
+ df = df[opt_slice[0]:opt_slice[1]]
+ df = df.fillna('')
+ fp_videos = glob(join(dir_out, '*.mp4'))
+ fp_videos += glob(join(dir_out, '*.webm'))
+ fp_videos += glob(join(dir_out, '*.mkv'))
+
+ ydl = youtube_dl.YoutubeDL({'outtmpl': join(dir_out, '') + '%(id)s.%(ext)s'})
+
+ for i, row in df.iterrows():
+ vid = str(row['youtube_id'])
+ if not vid:
+ vid = row['vimeo_id']
+ if vid:
+ vid = str(int(vid))
+ url = 'https://vimeo.com/{}'.format(vid)
+ else:
+ url = 'https://youtube.com/watch?v={}'.format(vid)
+ if not vid:
+ log.warn('no video id: {} for {}'.format(vid, row['city']))
+ continue
+
+ found = False
+ for fp_video in fp_videos:
+ if vid in fp_video:
+ #log.debug('skip: {}'.format(vid))
+ found = True
+
+ if not found:
+ try:
+ with ydl:
+ ydl.download([url])
+ except:
+ log.error('could not dl: {}'.format(vid))
diff --git a/megapixels/commands/datasets/feret.py b/megapixels/commands/datasets/feret.py
new file mode 100644
index 00000000..906b4e37
--- /dev/null
+++ b/megapixels/commands/datasets/feret.py
@@ -0,0 +1,139 @@
+import bz2
+import io
+
+import click
+from PIL import Image
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+from app.utils.logger_utils import Logger
+
+log = Logger.getLogger()
+
+pose_choices = {
+'fa':0, 'fb':0, 'hl':67.5, 'hr':-67.5, 'pl':90, 'pr':-90,
+'ql':22.5, 'qr':-22.5, 'ra':45, 'rb':15, 'rc':-15, 'rd':-45, 're':-75}
+
+poses_left = ['hl', 'ql', 'pl', 'ra', 'rb']
+poses_right = ['hr', 'qr', 'pr', 'rc', 're', 're']
+
+@click.command()
+@click.option('-i', '--input', 'opt_fp_in', required=True,
+ help='Input directory')
+@click.option('-o', '--output', 'opt_fp_out', required=True,
+ help='Output directory')
+@click.option('-a', '--angle', 'opt_angle', type=(float, float), default=(0,0),
+ help='Min/max face angles')
+@click.option('-t', '--threads', 'opt_threads', default=8,
+ help='Number of threads')
+@click.option('--flip', 'opt_flip', type=click.Choice(['r', 'l']),
+ help='Flip profile images to the R or L')
+@click.pass_context
+def cli(ctx, opt_fp_in, opt_fp_out, opt_angle, opt_threads, opt_flip):
+ """Extracts FERET images"""
+
+ from glob import glob
+ from os.path import join
+ from pathlib import Path
+ import time
+ from tqdm import tqdm
+ from multiprocessing.dummy import Pool as ThreadPool
+ from functools import partial
+
+ from PIL import ImageOps
+ from app.utils import file_utils
+
+ # filter angles
+ poses = [k for k, v in pose_choices.items() if \
+ abs(v) >= opt_angle[0] and abs(v) <= opt_angle[1]]
+
+ # glob images dir for all *ppm.bz2
+ fp_ims = []
+ for pose in poses:
+ log.info('globbing pose: {}'.format(pose))
+ fp_ims += glob(join(opt_fp_in, '**/*_{}.ppm.bz2').format(pose))
+ log.info('Processing: {:,} files'.format(len(fp_ims)))
+
+ # convert bz2 to png
+ def pool_func(fp_im, opt_fp_out, opt_flip):
+ try:
+ pbar.update(1)
+ im_pil = bz2_to_pil(fp_im)
+ fpp_im = Path(fp_im)
+ fp_out = join(opt_fp_out, '{}.png'.format(fpp_im.stem))
+ fp_out = fp_out.replace('.ppm','') # remove ppm
+ if opt_flip:
+ pose_code = fpp_im.stem.split('_')[-1][:2]
+ # log.debug('opt_flip: {}, found: {}'.format(opt_flip, pose_code))
+ if opt_flip == 'r' and pose_code in poses_right \
+ or opt_flip == 'l' and pose_code in poses_left:
+ im_pil = ImageOps.mirror(im_pil)
+ im_pil.save(fp_out)
+ return True
+ except Exception as e:
+ log.error('Error processing: {}, error: {}'.format(fp_im, e))
+ return False
+
+ # make output directory
+ file_utils.mkdirs(opt_fp_out)
+
+ # setup multithreading
+ pbar = tqdm(total=len(fp_ims))
+ pool_resize = partial(pool_func, opt_fp_out=opt_fp_out, opt_flip=opt_flip)
+ pool = ThreadPool(opt_threads)
+ with tqdm(total=len(fp_ims)) as pbar:
+ results = pool.map(pool_resize, fp_ims)
+ pbar.close()
+
+ # results
+ log.info('Converted: {} / {} images'.format(results.count(True), len(fp_ims)))
+
+
+# ------------------------------------------------------------------
+# local utils
+
+def bz2_to_pil(fp_src):
+ with open(fp_src, 'rb') as fp:
+ im_raw = bz2.decompress(fp.read())
+ im_pil = Image.open(io.BytesIO(im_raw))
+ return im_pil
+
+
+
+"""
+
+A breakdown of the images by pose is:
+ Pose Angle Images Subjects
+ fa 0 1364 994
+ fb 0 1358 993
+ hl +67.5 1267 917
+ hr -67.5 1320 953
+ pl +90 1312 960
+ pr -90 1363 994
+ ql +22.5 761 501
+ qr -22.5 761 501
+ ra +45 321 261
+ rb +15 321 261
+ rc -15 610 423
+ rd -45 290 236
+ re -75 290 236
+
+ There are 13 different poses. (The orientation "right" means
+facing the photographer's right.)
+ fa regular frontal image
+ fb alternative frontal image, taken shortly after the
+ corresponding fa image
+ pl profile left
+ hl half left - head turned about 67.5 degrees left
+ ql quarter left - head turned about 22.5 degrees left
+ pr profile right
+ hr half right - head turned about 67.5 degrees right
+ qr quarter right - head turned about 22.5 degrees right
+ ra random image - head turned about 45 degree left
+ rb random image - head turned about 15 degree left
+ rc random image - head turned about 15 degree right
+ rd random image - head turned about 45 degree right
+ re random image - head turned about 75 degree right
+
+""" \ No newline at end of file
diff --git a/megapixels/commands/datasets/filter_by_pose.py b/megapixels/commands/datasets/filter_by_pose.py
new file mode 100644
index 00000000..a588b18e
--- /dev/null
+++ b/megapixels/commands/datasets/filter_by_pose.py
@@ -0,0 +1,96 @@
+import click
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+from app.utils.logger_utils import Logger
+
+log = Logger.getLogger()
+
+@click.command()
+@click.option('-i', '--input', 'opt_fp_in', default=None,
+ help='Override enum input filename CSV')
+@click.option('-o', '--output', 'opt_fp_out', default=None,
+ help='Override enum output filename CSV')
+@click.option('--data_store', 'opt_data_store',
+ type=cfg.DataStoreVar,
+ default=click_utils.get_default(types.DataStore.SSD),
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--dataset', 'opt_dataset',
+ type=cfg.DatasetVar,
+ required=True,
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--yaw', 'opt_yaw', type=(float, float), default=cfg.POSE_MINMAX_YAW,
+ help='Yaw (min, max)')
+@click.option('--roll', 'opt_roll', type=(float, float), default=cfg.POSE_MINMAX_ROLL,
+ help='Roll (min, max)')
+@click.option('--pitch', 'opt_pitch', type=(float, float), default=cfg.POSE_MINMAX_PITCH,
+ help='Pitch (min, max)')
+@click.pass_context
+def cli(ctx, opt_fp_in, opt_fp_out, opt_data_store, opt_dataset, opt_yaw, opt_roll, opt_pitch):
+ """Filter out exaggerated poses"""
+
+ import sys
+ from os.path import join
+ from pathlib import Path
+ import shutil
+ from datetime import datetime
+
+ import pandas as pd
+ from tqdm import tqdm
+
+ from app.models.data_store import DataStore
+ from app.utils import file_utils
+
+ # create date store
+ data_store = DataStore(opt_data_store, opt_dataset)
+ # load pose
+ fp_pose = data_store.metadata(types.Metadata.FACE_POSE)
+ df_pose = pd.read_csv(fp_pose).set_index('index')
+ # load roi
+ fp_roi = data_store.metadata(types.Metadata.FACE_ROI)
+ df_roi = pd.read_csv(fp_roi).set_index('index')
+ # load filepath
+ fp_record = data_store.metadata(types.Metadata.FILE_RECORD)
+ df_record = pd.read_csv(fp_record).set_index('index')
+ # debug
+ log.info('Processing {:,} rows'.format(len(df_pose)))
+ n_rows = len(df_record)
+
+ # filter out extreme poses
+ invalid_indices = []
+ for ds_pose in tqdm(df_pose.itertuples(), total=len(df_pose)):
+ if ds_pose.yaw < opt_yaw[0] or ds_pose.yaw > opt_yaw[1] \
+ and ds_pose.roll < opt_roll[0] or ds_pose.roll > opt_roll[1] \
+ and ds_pose.pitch < opt_pitch[0] or ds_pose.pitch > opt_pitch[1]:
+ invalid_indices.append(ds_pose.Index) # unique file indexs
+
+ # filter out valid/invalid
+ log.info(f'indices 0-20: {invalid_indices[:20]}')
+ log.info(f'Removing {len(invalid_indices)} invalid indices...')
+ df_record = df_record.drop(df_record.index[invalid_indices])
+ df_roi = df_roi.drop(df_roi.index[invalid_indices])
+ df_pose = df_pose.drop(df_pose.index[invalid_indices])
+ log.info(f'Removed {n_rows - len(df_record)}')
+
+ # move file to make backup
+ dir_bkup = join(Path(fp_pose).parent, f'backup_{datetime.now():%Y%m%d_%M%S}')
+ file_utils.mkdirs(dir_bkup)
+ # move files to backup
+ shutil.move(fp_record, join(dir_bkup, Path(fp_record).name))
+ shutil.move(fp_roi, join(dir_bkup, Path(fp_roi).name))
+ shutil.move(fp_pose, join(dir_bkup, Path(fp_pose).name))
+ # resave file records
+ df_record = df_record.reset_index(drop=True)
+ df_record.index.name = 'index'
+ df_record.to_csv(fp_record)
+ # resave ROI
+ df_roi = df_roi.reset_index(drop=True)
+ df_roi.index.name = 'index'
+ df_roi.to_csv(fp_roi)
+ # resave pose
+ df_pose = df_pose.reset_index(drop=True)
+ df_pose.index.name = 'index'
+ df_pose.to_csv(fp_pose)
diff --git a/megapixels/commands/datasets/gen_filepath.py b/megapixels/commands/datasets/gen_filepath.py
new file mode 100644
index 00000000..5db405c0
--- /dev/null
+++ b/megapixels/commands/datasets/gen_filepath.py
@@ -0,0 +1,102 @@
+"""
+Begin with this file to process folder of images
+- Converts folders and subdirectories into CSV with file attributes split
+"""
+import click
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+from app.utils.logger_utils import Logger
+
+log = Logger.getLogger()
+
+@click.command()
+@click.option('-i', '--input', 'opt_fp_in',
+ help='Override enum input filename CSV')
+@click.option('-o', '--output', 'opt_fp_out',
+ help='Override enum output filename CSV')
+@click.option('--data_store', 'opt_data_store',
+ type=cfg.DataStoreVar,
+ default=click_utils.get_default(types.DataStore.NAS),
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--dataset', 'opt_dataset',
+ type=cfg.DatasetVar,
+ required=True,
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None),
+ help='Slice list of files')
+@click.option('--recursive/--no-recursive', 'opt_recursive', is_flag=True, default=False,
+ help='Use glob recursion (slower)')
+@click.option('-t', '--threads', 'opt_threads', default=4,
+ help='Number of threads')
+@click.option('-f', '--force', 'opt_force', is_flag=True,
+ help='Force overwrite file')
+@click.pass_context
+def cli(ctx, opt_fp_in, opt_fp_out, opt_data_store, opt_dataset, opt_slice,
+ opt_recursive, opt_threads, opt_force):
+ """Multithreading test"""
+
+ from glob import glob
+ from os.path import join
+ from pathlib import Path
+ import time
+ from multiprocessing.dummy import Pool as ThreadPool
+ import random
+
+ import pandas as pd
+ from tqdm import tqdm
+ from glob import glob
+
+ from app.models.data_store import DataStore
+ from app.utils import file_utils, im_utils
+
+ data_store = DataStore(opt_data_store, opt_dataset)
+ fp_out = opt_fp_out if opt_fp_out is not None else data_store.metadata(types.Metadata.FILEPATH)
+ if not opt_force and Path(fp_out).exists():
+ log.error('File exists. Use "-f / --force" to overwite')
+ return
+
+
+ # glob files
+ fp_in = opt_fp_in if opt_fp_in is not None else data_store.media_images_original()
+ fp_ims = []
+ log.info(f'Globbing {fp_in}')
+ for ext in ['jpg', 'png']:
+ if opt_recursive:
+ fp_glob = join(fp_in, '**/*.{}'.format(ext))
+ fp_ims += glob(fp_glob, recursive=True)
+ else:
+ fp_glob = join(fp_in, '*.{}'.format(ext))
+ fp_ims += glob(fp_glob)
+
+ if not fp_ims:
+ log.warn('No images. Try with "--recursive"')
+ return
+
+ if opt_slice:
+ fp_ims = fp_ims[opt_slice[0]:opt_slice[1]]
+
+ log.info('Found {:,} images'.format(len(fp_ims)))
+
+
+ # convert data to dict
+ data = []
+ for i, fp_im in enumerate(tqdm(fp_ims)):
+ fpp_im = Path(fp_im)
+ subdir = str(fpp_im.parent.relative_to(fp_in))
+ data.append( {
+ 'subdir': subdir,
+ 'fn': fpp_im.stem,
+ 'ext': fpp_im.suffix.replace('.','')
+ })
+
+ # save to CSV
+ file_utils.mkdirs(fp_out)
+ df_filepath = pd.DataFrame.from_dict(data)
+ df_filepath = df_filepath.sort_values(by=['subdir'], ascending=True)
+ df_filepath = df_filepath.reset_index()
+ df_filepath.index.name = 'index'
+ df_filepath.to_csv(fp_out) \ No newline at end of file
diff --git a/megapixels/commands/datasets/gen_uuid.py b/megapixels/commands/datasets/gen_uuid.py
new file mode 100644
index 00000000..d7e7b52c
--- /dev/null
+++ b/megapixels/commands/datasets/gen_uuid.py
@@ -0,0 +1,65 @@
+import click
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+from app.utils.logger_utils import Logger
+
+log = Logger.getLogger()
+
+@click.command()
+@click.option('-i', '--input', 'opt_fp_in', default=None,
+ help='Override enum input filename CSV')
+@click.option('-o', '--output', 'opt_fp_out', default=None,
+ help='Override enum output filename CSV')
+@click.option('--data_store', 'opt_data_store',
+ type=cfg.DataStoreVar,
+ default=click_utils.get_default(types.DataStore.NAS),
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--dataset', 'opt_dataset',
+ type=cfg.DatasetVar,
+ required=True,
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('-f', '--force', 'opt_force', is_flag=True,
+ help='Force overwrite file')
+@click.pass_context
+def cli(ctx, opt_fp_in, opt_fp_out, opt_data_store, opt_dataset, opt_force):
+ """Appends UUID to records CSV"""
+
+ from glob import glob
+ from os.path import join
+ from pathlib import Path
+ import base64
+ import uuid
+
+ from tqdm import tqdm
+ import pandas as pd
+
+ from app.models.data_store import DataStore
+
+
+ # set data_store
+ data_store = DataStore(opt_data_store, opt_dataset)
+ # get filepath out
+ fp_out = data_store.metadata(types.Metadata.UUID) if opt_fp_out is None else opt_fp_out
+ # exit if exists
+ if not opt_force and Path(fp_out).exists():
+ log.error('File exists. Use "-f / --force" to overwite')
+ return
+
+ # load sha256 records
+ fp_in = data_store.metadata(types.Metadata.SHA256) if opt_fp_in is None else opt_fp_in
+ log.info(f'Loading: {fp_in}')
+ df_records = pd.read_csv(fp_in).set_index('index')
+
+ df_uuids = df_records.copy()
+ df_uuids['uuid'] = [uuid.uuid4()] * len(df_uuids)
+
+ for df_record in tqdm(df_records.itertuples(), total=len(df_uuids)):
+ image_index = df_record.Index
+ df_uuids.at[image_index, 'uuid'] = uuid.uuid4()
+
+ df_uuids = df_uuids.drop(['sha256', 'identity_index'], axis=1)
+ df_uuids.to_csv(fp_out) \ No newline at end of file
diff --git a/megapixels/commands/datasets/identity_meta_lfw.py b/megapixels/commands/datasets/identity_meta_lfw.py
new file mode 100644
index 00000000..45386b23
--- /dev/null
+++ b/megapixels/commands/datasets/identity_meta_lfw.py
@@ -0,0 +1,93 @@
+'''
+add identity from description using subdir
+'''
+import click
+
+from app.settings import types
+from app.models.dataset import Dataset
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+from app.utils.logger_utils import Logger
+
+log = Logger.getLogger()
+
+@click.command()
+@click.option('-i', '--input', 'opt_fp_in', required=True,
+ help='Identity meta file')
+@click.option('-o', '--output', 'opt_fp_out', default=None,
+ help='Override enum output filename CSV')
+@click.option('--column', 'opt_identity_key', default='identity_key',
+ help='Match column')
+@click.option('--data_store', 'opt_data_store',
+ type=cfg.DataStoreVar,
+ default=click_utils.get_default(types.DataStore.SSD),
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('-f', '--force', 'opt_force', is_flag=True,
+ help='Force overwrite file')
+@click.pass_context
+def cli(ctx, opt_fp_in, opt_fp_out, opt_identity_key, opt_data_store, opt_force):
+ """Display image info"""
+
+ import sys
+ from glob import glob
+ from os.path import join
+ from pathlib import Path
+ import time
+
+ import pandas as pd
+ import cv2 as cv
+ from tqdm import tqdm
+
+ from app.utils import file_utils, im_utils
+ from app.models.data_store import DataStore
+
+ log = Logger.getLogger()
+
+ # output file
+ opt_dataset = types.Dataset.LFW
+ data_store = DataStore(opt_data_store, opt_dataset)
+ fp_out = data_store.metadata(types.Metadata.IDENTITY) if opt_fp_out is None else opt_fp_out
+ # exit if exists
+ log.debug(fp_out)
+ if not opt_force and Path(fp_out).exists():
+ log.error('File exists. Use "-f / --force" to overwite')
+ return
+
+ # init dataset
+ # load file records
+ fp_record = data_store.metadata(types.Metadata.FILE_RECORD)
+ df_record = pd.read_csv(fp_record).set_index('index')
+
+ # load identity meta
+ # this file is maybe prepared in a Jupyter notebook
+ # the "identity_key"
+ df_identity_meta = pd.read_csv(opt_fp_in).set_index('index')
+ # create a new file called 'identity.csv'
+ identities = []
+ # iterate records and get identity index where 'identity_key' matches
+ log.debug(type(df_record))
+ identity_indices = []
+ for record_idx, ds_record in tqdm(df_record.iterrows(), total=len(df_record)):
+ identity_value = ds_record[opt_identity_key]
+ identity_index = ds_record.identity_index
+ ds_identity_meta = df_identity_meta.loc[(df_identity_meta[opt_identity_key] == identity_value)]
+ if identity_index not in identity_indices:
+ identity_indices.append(identity_index)
+ identities.append({
+ 'description': ds_identity_meta.description.values[0],
+ 'name': ds_identity_meta.name.values[0],
+ 'images': ds_identity_meta.images.values[0],
+ 'gender': ds_identity_meta.gender.values[0],
+ })
+
+ # write to csv
+ df_identity = pd.DataFrame.from_dict(identities)
+ df_identity.index.name = 'index'
+ df_identity.to_csv(fp_out)
+ '''
+ index,name,name_orig,description,gender,images,image_index,identity_key
+ 0,A. J. Cook,AJ Cook,Canadian actress,f,1,0,AJ_Cook
+ '''
+
+
diff --git a/megapixels/commands/datasets/identity_meta_vgg_face2.py b/megapixels/commands/datasets/identity_meta_vgg_face2.py
new file mode 100644
index 00000000..85b6644d
--- /dev/null
+++ b/megapixels/commands/datasets/identity_meta_vgg_face2.py
@@ -0,0 +1,88 @@
+'''
+add identity from description using subdir
+'''
+import click
+
+from app.settings import types
+from app.models.dataset import Dataset
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+from app.utils.logger_utils import Logger
+
+log = Logger.getLogger()
+
+@click.command()
+@click.option('-i', '--input', 'opt_fp_in', required=True,
+ help='Identity meta file')
+@click.option('-o', '--output', 'opt_fp_out', default=None,
+ help='Override enum output filename CSV')
+@click.option('--data_store', 'opt_data_store',
+ type=cfg.DataStoreVar,
+ default=click_utils.get_default(types.DataStore.SSD),
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('-f', '--force', 'opt_force', is_flag=True,
+ help='Force overwrite file')
+@click.pass_context
+def cli(ctx, opt_fp_in, opt_fp_out, opt_data_store, opt_force):
+ """Display image info"""
+
+ import sys
+ from glob import glob
+ from os.path import join
+ from pathlib import Path
+ import time
+
+ import pandas as pd
+ import cv2 as cv
+ from tqdm import tqdm
+
+ from app.utils import file_utils, im_utils
+ from app.models.data_store import DataStore
+
+ log = Logger.getLogger()
+
+ # output file
+ opt_dataset = types.Dataset.VGG_FACE2
+ data_store = DataStore(opt_data_store, opt_dataset)
+ fp_out = data_store.metadata(types.Metadata.IDENTITY) if opt_fp_out is None else opt_fp_out
+ # exit if exists
+ log.debug(fp_out)
+ if not opt_force and Path(fp_out).exists():
+ log.error('File exists. Use "-f / --force" to overwite')
+ return
+
+ # init dataset
+ # load file records
+ identity_key = 'identity_key'
+ fp_record = data_store.metadata(types.Metadata.FILE_RECORD)
+ df_record = pd.read_csv(fp_record).set_index('index')
+
+ # load identity meta
+ # this file is maybe prepared in a Jupyter notebook
+ # the "identity_key"
+ df_identity_meta = pd.read_csv(opt_fp_in).set_index('index')
+ # create a new file called 'identity.csv'
+ identities = []
+ # iterate records and get identity index where 'identity_key' matches
+ log.debug(type(df_record))
+ identity_indices = []
+ for ds_record in tqdm(df_record.itertuples(), total=len(df_record)):
+ identity_value = ds_record.identity_key
+ identity_index = ds_record.identity_index
+ ds_identity_meta = df_identity_meta.loc[(df_identity_meta[identity_key] == identity_value)]
+ if identity_index not in identity_indices:
+ identity_indices.append(identity_index)
+ identities.append({
+ 'description': ds_identity_meta.description.values[0],
+ 'name': ds_identity_meta.name.values[0],
+ 'images': ds_identity_meta.images.values[0],
+ 'gender': ds_identity_meta.gender.values[0],
+ })
+
+ # write to csv
+ df_identity = pd.DataFrame.from_dict(identities)
+ df_identity.index.name = 'index'
+ df_identity.to_csv(fp_out)
+
+
diff --git a/megapixels/commands/datasets/lookup.py b/megapixels/commands/datasets/lookup.py
new file mode 100644
index 00000000..5ae4c3f5
--- /dev/null
+++ b/megapixels/commands/datasets/lookup.py
@@ -0,0 +1,63 @@
+import click
+
+from app.settings import types
+from app.models.dataset import Dataset
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+from app.utils.logger_utils import Logger
+
+log = Logger.getLogger()
+
+@click.command()
+@click.option('--index', 'opt_index', type=int, required=True,
+ help='File index to lookup')
+@click.option('--data_store', 'opt_data_store',
+ type=cfg.DataStoreVar,
+ default=click_utils.get_default(types.DataStore.SSD),
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--dataset', 'opt_dataset',
+ type=cfg.DatasetVar,
+ required=True,
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.pass_context
+def cli(ctx, opt_index, opt_data_store, opt_dataset):
+ """Display image info"""
+
+ import sys
+ from glob import glob
+ from os.path import join
+ from pathlib import Path
+ import time
+
+ import pandas as pd
+ import cv2 as cv
+ from tqdm import tqdm
+
+ from app.utils import file_utils, im_utils
+ from app.models.data_store import DataStore
+
+ log = Logger.getLogger()
+ # init dataset
+ dataset = Dataset(opt_data_store, opt_dataset)
+ #dataset.load_face_vectors()
+ dataset.load_records()
+ dataset.load_identities()
+ # set data store and load files
+ # get image record from file index
+ image_record = dataset.index_to_record(opt_index)
+ image_record.summarize()
+ # load image
+ im = cv.imread(image_record.filepath)
+ # display
+ cv.imshow('', im)
+ # cv gui
+ while True:
+ k = cv.waitKey(1) & 0xFF
+ if k == 27 or k == ord('q'): # ESC
+ cv.destroyAllWindows()
+ sys.exit()
+ elif k != 255:
+ # any key to continue
+ break \ No newline at end of file
diff --git a/megapixels/commands/datasets/megaface_flickr_api.py b/megapixels/commands/datasets/megaface_flickr_api.py
new file mode 100644
index 00000000..62232ab8
--- /dev/null
+++ b/megapixels/commands/datasets/megaface_flickr_api.py
@@ -0,0 +1,141 @@
+from glob import glob
+import os
+from os.path import join
+from pathlib import Path
+
+import click
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+from app.utils import logger_utils
+
+import dlib
+import pandas as pd
+from PIL import Image, ImageOps, ImageFilter
+from app.utils import file_utils, im_utils
+
+
+log = logger_utils.Logger.getLogger()
+
+@click.command()
+@click.option('-i', '--input', 'opt_fp_in', required=True,
+ help='Input directory')
+@click.option('-o', '--output', 'opt_fp_out',
+ help='Output directory')
+@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None),
+ help='Slice list of files')
+@click.option('-d', '--delay', 'opt_delay', default=None, type=int,
+ help='Delay between API calls to prevent rate-limiting')
+@click.option('--checkpoints', 'opt_checkpoints', is_flag=True,
+ help='Save checkpoints')
+@click.option('--api_key', 'opt_api_key', envvar='FLICKR_API_KEY')
+@click.option('--api_secret', 'opt_api_secret', envvar='FLICKR_API_SECRET')
+@click.option('--checkpoint_interval', 'opt_ckpt_interval', default=10000,
+ help='Save checkpoint interval')
+@click.pass_context
+def cli(ctx, opt_fp_in, opt_fp_out, opt_slice, opt_api_key, opt_api_secret,
+ opt_delay, opt_checkpoints, opt_ckpt_interval):
+ """Appends Flickr API info to CSV"""
+
+ from tqdm import tqdm
+ from glob import glob
+ import time
+ import flickr_api # pip install flickr_api
+ from flickr_api.flickrerrors import FlickrAPIError
+
+ # -------------------------------------------------
+ # process
+
+ if not opt_api_key or not opt_api_secret:
+ log.error('source .env vars for Flickr API and try again')
+ return
+
+ # init Flickr API
+ flickr_api.set_keys(api_key=opt_api_key, api_secret=opt_api_secret)
+
+ # reqd in CSV
+ df_ids = pd.read_csv(opt_fp_in)
+ if opt_slice:
+ df_ids = df_ids[opt_slice[0]:opt_slice[1]]
+
+ log.info('Processing: {:,} items'.format(len(df_ids)))
+
+ # iterate MegaFace IDs
+ identities = []
+
+ tqdm.pandas()
+
+ for idx, df_id in tqdm(df_ids.iterrows(), total=len(df_ids)):
+ # a = flickr_api.Person(id='123456789@N01')
+ df_id_dict = dict(df_id)
+
+ # append relevant data
+ try:
+ person = flickr_api.Person(id=df_id['nsid'])
+ info = person.getInfo()
+ df_id_dict.update( {
+ 'user_name': info.get('username', ''),
+ 'location': info.get('location', ''),
+ 'real_name': info.get('realname', ''),
+ 'time_zone': info.get('timezone', {}).get('timezone_id', ''),
+ 'time_first_photo': info.get('photos_info', {}).get('firstdatetaken'),
+ 'photos_count': info.get('photos_info', {}).get('count'),
+ 'description': info.get('description', ''),
+ 'id': info.get('id'),
+ 'path_alias': info.get('path_alias', ''),
+ 'is_pro': info.get('ispro', ''),
+ 'url_photos': info.get('photosurl', ''),
+ 'url_profile': info.get('photosurl', ''),
+ 'url_mobile': info.get('mobileurl', ''),
+ })
+ identities.append(df_id_dict)
+
+ except FlickrAPIError as e:
+ log.error(e)
+
+
+ if opt_checkpoints:
+ if (idx + 1) % opt_ckpt_interval == 0:
+ df = pd.DataFrame.from_dict(identities)
+ fpp_out = Path(opt_fp_out)
+ opt_fp_out_ckpt = join(fpp_out.parent, '{}_ckpt_{}.csv'.format(fpp_out.stem, file_utils.zpad(idx + 1)))
+ log.info('Saving checkpoint {:,} to {}'.format(idx + 1, opt_fp_out_ckpt))
+ df.to_csv(opt_fp_out_ckpt, index=False)
+
+ if opt_delay:
+ time.sleep(opt_delay)
+
+
+ df = pd.DataFrame.from_dict(identities)
+ df.to_csv(opt_fp_out, index=False)
+
+ log.info('Wrote: {:,} lines to {}'.format(len(df), opt_fp_out))
+
+
+"""
+Example API data:
+{'id': '7124086@N07',
+ 'nsid': '7124086@N07',
+ 'ispro': 1,
+ 'can_buy_pro': 0,
+ 'iconserver': '2325',
+ 'iconfarm': 3,
+ 'path_alias': 'shirleylin',
+ 'has_stats': '1',
+ 'pro_badge': 'standard',
+ 'expire': '0',
+ 'username': 'ShirleyLin',
+ 'realname': 'Shirley Lin',
+ 'location': 'Fremont, California, US',
+ 'timezone': {'label': 'Pacific Time (US & Canada); Tijuana',
+ 'offset': '-08:00',
+ 'timezone_id': 'PST8PDT'},
+ 'description': '',
+ 'photosurl': 'https://www.flickr.com/photos/shirleylin/',
+ 'profileurl': 'https://www.flickr.com/people/shirleylin/',
+ 'mobileurl': 'https://m.flickr.com/photostream.gne?id=7102756',
+ 'photos_info': {'firstdatetaken': '2004-05-24 12:12:15',
+ 'firstdate': '1172556588',
+ 'count': 9665}}
+""" \ No newline at end of file
diff --git a/megapixels/commands/datasets/megaface_names.py b/megapixels/commands/datasets/megaface_names.py
new file mode 100644
index 00000000..01e93e2d
--- /dev/null
+++ b/megapixels/commands/datasets/megaface_names.py
@@ -0,0 +1,65 @@
+from glob import glob
+import os
+from os.path import join
+from pathlib import Path
+
+import click
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+from app.utils import logger_utils
+
+import dlib
+import pandas as pd
+from PIL import Image, ImageOps, ImageFilter
+from app.utils import file_utils, im_utils
+
+
+log = logger_utils.Logger.getLogger()
+
+@click.command()
+@click.option('-i', '--input', 'opt_fp_in', required=True,
+ help='Input directory')
+@click.option('-o', '--output', 'opt_fp_out',
+ help='Output directory')
+@click.pass_context
+def cli(ctx, opt_fp_in, opt_fp_out):
+ """Creates CSV of NSIDs from MegaFace"""
+
+ from tqdm import tqdm
+ from glob import glob
+
+ # -------------------------------------------------
+ # process
+ fp_im_dirs = glob(join(opt_fp_in, '**/'), recursive=True)
+
+ log.info('Found {} directories'.format(len(fp_im_dirs)))
+
+ identities = {}
+
+ for fp_im_dir in tqdm(fp_im_dirs):
+ # 1234567@N05_identity_1
+ try:
+ dir_id_name = Path(fp_im_dir).name
+ nsid = dir_id_name.split('_')[0]
+ identity_num = dir_id_name.split('_')[2]
+ id_key = '{}_{}'.format(nsid, identity_num)
+ num_images = len(glob(join(fp_im_dir, '*.jpg')))
+ if not id_key in identities.keys():
+ identities[id_key] = {'nsid': nsid, 'identity': identity_num, 'images': num_images}
+ else:
+ identities[id_key]['images'] += num_images
+ except Exception as e:
+ continue
+
+ # convert to dict
+ identities_list = [v for k, v in identities.items()]
+ df = pd.DataFrame.from_dict(identities_list)
+
+ file_utils.mkdirs(opt_fp_out)
+
+ log.info('Wrote {} lines to {}'.format(len(df), opt_fp_out))
+ df.to_csv(opt_fp_out, index=False)
+
+
diff --git a/megapixels/commands/datasets/records.py b/megapixels/commands/datasets/records.py
new file mode 100644
index 00000000..b6ef618b
--- /dev/null
+++ b/megapixels/commands/datasets/records.py
@@ -0,0 +1,167 @@
+'''
+
+'''
+import click
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+from app.utils.logger_utils import Logger
+
+log = Logger.getLogger()
+
+identity_sources = ['subdir', 'subdir_head', 'subdir_tail']
+
+@click.command()
+@click.option('-i', '--input', 'opt_fp_in', default=None,
+ help='Override enum input filename CSV')
+@click.option('-o', '--output', 'opt_fp_out', default=None,
+ help='Override enum output filename CSV')
+@click.option('-m', '--media', 'opt_dir_media', default=None,
+ help='Override enum media directory')
+@click.option('--data_store', 'opt_data_store',
+ type=cfg.DataStoreVar,
+ default=click_utils.get_default(types.DataStore.SSD),
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--dataset', 'opt_dataset',
+ type=cfg.DatasetVar,
+ required=True,
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None),
+ help='Slice list of files')
+@click.option('-t', '--threads', 'opt_threads', default=12,
+ help='Number of threads')
+@click.option('-f', '--force', 'opt_force', is_flag=True,
+ help='Force overwrite file')
+@click.option('--identity', 'opt_identity', default=None, type=click.Choice(identity_sources),
+ help='Identity source, blank for no identity')
+@click.option('--recursive/--no-recursive', 'opt_recursive', is_flag=True, default=False,
+ help='Use glob recursion (slower)')
+@click.pass_context
+def cli(ctx, opt_fp_in, opt_fp_out, opt_dataset, opt_data_store, opt_dir_media, opt_slice, opt_threads,
+ opt_identity, opt_force, opt_recursive):
+ """Generates sha256, uuid, and identity index CSV file"""
+
+ import sys
+ from glob import glob
+ from os.path import join
+ from pathlib import Path
+ import time
+ from multiprocessing.dummy import Pool as ThreadPool
+ import random
+ import uuid
+
+ import pandas as pd
+ from tqdm import tqdm
+ from glob import glob
+
+ from app.models.data_store import DataStore
+ from app.utils import file_utils, im_utils
+
+
+ # set data_store
+ data_store = DataStore(opt_data_store, opt_dataset)
+ # get filepath out
+ fp_out = data_store.metadata(types.Metadata.FILE_RECORD) if opt_fp_out is None else opt_fp_out
+ # exit if exists
+ if not opt_force and Path(fp_out).exists():
+ log.error('File exists. Use "-f / --force" to overwite')
+ return
+
+ # ----------------------------------------------------------------
+ # glob files
+
+ fp_in = opt_fp_in if opt_fp_in is not None else data_store.media_images_original()
+ log.info(f'Globbing {fp_in}')
+ fp_ims = file_utils.glob_multi(fp_in, ['jpg', 'png'], recursive=opt_recursive)
+ # fail if none
+ if not fp_ims:
+ log.error('No images. Try with "--recursive"')
+ return
+ # slice to reduce
+ if opt_slice:
+ fp_ims = fp_ims[opt_slice[0]:opt_slice[1]]
+ log.info('Found {:,} images'.format(len(fp_ims)))
+
+
+ # ----------------------------------------------------------------
+ # multithread process into SHA256
+
+ pbar = tqdm(total=len(fp_ims))
+
+ def as_sha256(fp_im):
+ pbar.update(1)
+ return file_utils.sha256(fp_im)
+
+ # convert to thread pool
+ sha256s = [] # ?
+ pool = ThreadPool(opt_threads)
+ with tqdm(total=len(fp_ims)) as pbar:
+ sha256s = pool.map(as_sha256, fp_ims)
+ pbar.close()
+
+
+ # ----------------------------------------------------------------
+ # convert data to dict
+
+ data = []
+ indentity_count = 0
+ for sha256, fp_im in zip(sha256s, fp_ims):
+ fpp_im = Path(fp_im)
+ subdir = str(fpp_im.parent.relative_to(fp_in))
+
+
+ if opt_identity:
+ subdirs = subdir.split('/')
+ if not len(subdirs) > 0:
+ log.error(f'Could not split subdir: "{subdir}. Try different option for "--identity"')
+ log.error('exiting')
+ return
+ if opt_identity == 'subdir':
+ identity = subdirs[0] # use first/only part
+ elif opt_identity == 'subdir_head':
+ identity = subdirs[0] # use first part of subdir path
+ elif opt_identity == 'subdir_tail':
+ identity = subdirs[-1] # use last part of subdir path
+ else:
+ identity = indentity_count # use incrementing number
+ indentity_count += 1
+
+ data.append({
+ 'subdir': subdir,
+ 'fn': fpp_im.stem,
+ 'ext': fpp_im.suffix.replace('.',''),
+ 'sha256': sha256,
+ 'uuid': uuid.uuid4(),
+ 'identity_key': identity
+ })
+
+ df_records = pd.DataFrame.from_dict(data)
+ if opt_identity:
+ log.info(f'adding identity index using: "{opt_identity}". This may take a while...')
+ # convert dict to DataFrame
+ # sort based on identity_key
+ df_records = df_records.sort_values(by=['identity_key'], ascending=True)
+ # add new column for identity
+ df_records['identity_index'] = [-1] * len(df_records)
+ # populate the identity_index
+ df_records_identity_groups = df_records.groupby('identity_key')
+ # enumerate groups to create identity indices
+ for identity_index, df_records_identity_group_tuple in enumerate(df_records_identity_groups):
+ identity_key, df_records_identity_group = df_records_identity_group_tuple
+ for ds_record in df_records_identity_group.itertuples():
+ df_records.at[ds_record.Index, 'identity_index'] = identity_index
+ # reset index after being sorted
+ df_records = df_records.reset_index(drop=True)
+ else:
+ # name everyone person 1, 2, 3...
+ pass
+
+ df_records.index.name = 'index' # reassign 'index' as primary key column
+ # write to CSV
+ file_utils.mkdirs(fp_out)
+ df_records.to_csv(fp_out)
+ # done
+ log.info(f'wrote rows: {len(df_records)} to {fp_out}') \ No newline at end of file
diff --git a/megapixels/commands/datasets/s3_sync.py b/megapixels/commands/datasets/s3_sync.py
new file mode 100644
index 00000000..17940c6d
--- /dev/null
+++ b/megapixels/commands/datasets/s3_sync.py
@@ -0,0 +1,61 @@
+import click
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+
+s3_dirs = {'media': cfg.S3_MEDIA_URL, 'metadata': cfg.S3_METADATA_URL}
+
+@click.command()
+@click.option('--data_store', 'opt_data_store',
+ type=cfg.DataStoreVar,
+ default=click_utils.get_default(types.DataStore.SSD),
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--dataset', 'opt_dataset',
+ type=cfg.DatasetVar,
+ required=True,
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('-t', '--type', 'opt_type', type=click.Choice(s3_dirs.keys()), required=True,
+ help='S3 location')
+@click.option('--dry-run', 'opt_dryrun', is_flag=True, default=False)
+@click.pass_context
+def cli(ctx, opt_data_store, opt_dataset, opt_type, opt_dryrun):
+ """Syncs files with S3/spaces server"""
+
+ from os.path import join
+ from pathlib import Path
+
+ from tqdm import tqdm
+ import pandas as pd
+ import subprocess
+
+ from app.utils import logger_utils, file_utils
+ from app.models.data_store import DataStore
+
+ # -------------------------------------------------
+ # init here
+
+ log = logger_utils.Logger.getLogger()
+
+ # set data_store
+ data_store = DataStore(opt_data_store, opt_dataset)
+ dataset_name = opt_dataset.name.lower()
+ if opt_type == 'media':
+ dir_src = join(data_store.uuid_dir(), '')
+ dir_dst = join(s3_dirs[opt_type], dataset_name, '')
+ elif opt_type == 'metadata':
+ dir_src = join(data_store.metadata_dir(), '')
+ dir_dst = join(s3_dirs[opt_type], dataset_name, '')
+
+ cmd = ['s3cmd', 'sync', dir_src, dir_dst, '-P', '--follow-symlinks']
+ log.info(' '.join(cmd))
+ if not opt_dryrun:
+ subprocess.call(cmd)
+
+
+'''
+upload: '/data_store_ssd/datasets/people/vgg_face2/media/uuid/00418e0e-48e9-44f9-b6a0-b2ffd773802e.jpg' -> 's3://megapixels/v1/media/vgg_face2/00418e0e-48e9-44f9-b6a0-b2ffd773802e.jpg' [3202 of 3187313]
+[2953 of 3187313]
+''' \ No newline at end of file
diff --git a/megapixels/commands/datasets/symlink_uuid.py b/megapixels/commands/datasets/symlink_uuid.py
new file mode 100644
index 00000000..7c5faa95
--- /dev/null
+++ b/megapixels/commands/datasets/symlink_uuid.py
@@ -0,0 +1,57 @@
+import click
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+
+@click.command()
+@click.option('-i', '--input', 'opt_fp_in', default=None,
+ help='Override enum input filename CSV')
+@click.option('-o', '--output', 'opt_fp_out', default=None,
+ help='Override enum output filename CSV')
+@click.option('--data_store', 'opt_data_store',
+ type=cfg.DataStoreVar,
+ default=click_utils.get_default(types.DataStore.SSD),
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--dataset', 'opt_dataset',
+ type=cfg.DatasetVar,
+ required=True,
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.pass_context
+def cli(ctx, opt_fp_in, opt_fp_out, opt_data_store, opt_dataset):
+ """Symlinks images to new directory for S3"""
+
+ import sys
+ import os
+ from os.path import join
+ from pathlib import Path
+
+ from tqdm import tqdm
+ import pandas as pd
+
+ from app.utils import logger_utils, file_utils
+ from app.models.data_store import DataStore
+
+ # -------------------------------------------------
+ # init here
+
+ log = logger_utils.Logger.getLogger()
+
+ # set data_store
+ data_store = DataStore(opt_data_store, opt_dataset)
+ fp_records = data_store.metadata(types.Metadata.FILE_RECORD)
+ df_records = pd.read_csv(fp_records).set_index('index')
+ nrows = len(df_records)
+
+ dir_out = data_store.uuid_dir() if opt_fp_out is None else opt_fp_out
+ file_utils.mkdirs(dir_out)
+
+ for ds_record in tqdm(df_records.itertuples(), total=nrows):
+ # make image path
+ fp_src = data_store.face(ds_record.subdir, ds_record.fn, ds_record.ext)
+ fp_dst = data_store.face_uuid(ds_record.uuid, ds_record.ext)
+ Path(fp_dst).symlink_to(Path(fp_src))
+
+ log.info('symlinked {:,} files'.format(nrows)) \ No newline at end of file
diff --git a/megapixels/commands/datasets/vecs_to_id.py b/megapixels/commands/datasets/vecs_to_id.py
new file mode 100644
index 00000000..07c7389e
--- /dev/null
+++ b/megapixels/commands/datasets/vecs_to_id.py
@@ -0,0 +1,50 @@
+import click
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+
+@click.command()
+@click.option('-i', '--input', 'opt_fp_in', required=True,
+ help='Input directory')
+@click.option('-r', '--records', 'opt_fp_records', required=True,
+ help='Input directory')
+@click.option('-o', '--output', 'opt_fp_out', required=True,
+ help='Output JSON')
+@click.option('-f', '--force', 'opt_force', is_flag=True,
+ help='Force overwrite file')
+@click.pass_context
+def cli(ctx, opt_fp_in, opt_fp_records, opt_fp_out,opt_force):
+ """Merges ID with face vectors"""
+
+ import sys
+ import os
+ from os.path import join
+ from pathlib import Path
+
+ from tqdm import tqdm
+ import pandas as pd
+
+ from app.utils import logger_utils, file_utils
+
+ # -------------------------------------------------
+ # init here
+
+ log = logger_utils.Logger.getLogger()
+
+ df_vecs = pd.read_csv(opt_fp_in)
+ df_records = pd.read_csv(opt_fp_records)
+ nrows = len(df_vecs)
+
+ # face vecs
+ id_vecs = {}
+
+ for roi_idx, row in tqdm(df_vecs.iterrows(), total=nrows):
+ record_id = int(row['id'])
+ vec = row['vec'].split(',')
+ id_vecs[record_id] = vec
+
+ # save as JSON
+ file_utils.write_json(id_vecs, opt_fp_out, verbose=True)
+
+ \ No newline at end of file
diff --git a/megapixels/commands/datasets/vecs_to_uuid.py b/megapixels/commands/datasets/vecs_to_uuid.py
new file mode 100644
index 00000000..7bb82083
--- /dev/null
+++ b/megapixels/commands/datasets/vecs_to_uuid.py
@@ -0,0 +1,56 @@
+"""
+Crop images to prepare for training
+"""
+
+import click
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+
+@click.command()
+@click.option('-i', '--input', 'opt_fp_in', required=True,
+ help='Input directory')
+@click.option('-r', '--records', 'opt_fp_records', required=True,
+ help='Input directory')
+@click.option('-o', '--output', 'opt_fp_out', required=True,
+ help='Output JSON')
+@click.option('-f', '--force', 'opt_force', is_flag=True,
+ help='Force overwrite file')
+@click.pass_context
+def cli(ctx, opt_fp_in, opt_fp_records, opt_fp_out,opt_force):
+ """Merges UUID with face vectors"""
+
+ import sys
+ import os
+ from os.path import join
+ from pathlib import Path
+
+ from tqdm import tqdm
+ import pandas as pd
+
+ from app.utils import logger_utils, file_utils
+
+ # -------------------------------------------------
+ # init here
+
+ log = logger_utils.Logger.getLogger()
+
+ df_vecs = pd.read_csv(opt_fp_in)
+ df_records = pd.read_csv(opt_fp_records)
+ nrows = len(df_vecs)
+
+ # face vecs
+ uuid_vecs = {}
+
+ for roi_idx, row in tqdm(df_vecs.iterrows(), total=nrows):
+ # make image path
+ record_id = int(row['id'])
+ uuid = df_records.iloc[record_id]['uuid']
+ vec = row['vec'].split(',')
+ uuid_vecs[uuid] = vec
+
+ # save as JSON
+ file_utils.write_json(uuid_vecs, opt_fp_out)
+
+ \ No newline at end of file
diff --git a/megapixels/commands/datasets/ytmu.py b/megapixels/commands/datasets/ytmu.py
new file mode 100644
index 00000000..66680ed0
--- /dev/null
+++ b/megapixels/commands/datasets/ytmu.py
@@ -0,0 +1,205 @@
+from glob import glob
+import os
+from os.path import join
+from pathlib import Path
+
+import click
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+from app.utils import logger_utils
+
+import dlib
+import pandas as pd
+from PIL import Image, ImageOps, ImageFilter
+from app.utils import file_utils, im_utils
+
+
+log = logger_utils.Logger.getLogger()
+
+@click.command()
+@click.option('-i', '--input', 'opt_fp_in', required=True,
+ help='Input directory')
+@click.option('-o', '--output', 'opt_fp_out',
+ help='Output directory')
+@click.option('--videos', 'opt_dir_videos',
+ help='Output directory')
+@click.option('--action', 'opt_action',
+ type=click.Choice(['info', 'faces', 'rename', 'download', 'metadata', 'split_frames']),
+ default='info',
+ help='Command action')
+@click.pass_context
+def cli(ctx, opt_fp_in, opt_fp_out, opt_dir_videos, opt_action):
+ """YTMU utils"""
+
+
+ from tqdm import tqdm
+
+ # -------------------------------------------------
+ # process
+
+ if opt_action == 'metadata':
+ # downloads video metadata with ytdl
+ handle_metadata(opt_fp_in, opt_fp_out)
+ elif opt_action == 'download':
+ # downloads video files with ytdl
+ handle_download(opt_fp_in, opt_fp_out)
+ elif opt_action == 'info':
+ # converts original data file to clean CSV
+ handle_info()
+ elif opt_action == 'rename':
+ # rename the videos to video ID
+ handle_rename(opt_fp_in, opt_fp_out, opt_dir_videos)
+ elif opt_action == 'split_frames':
+ # rename the videos to video ID
+ handle_split_frames(opt_fp_in, opt_fp_out, opt_dir_videos)
+
+
+
+
+# ----------------------------------------------------
+# handlers
+
+def handle_split_frames(fp_in, dir_out, dir_videos):
+ if not dir_out or not dir_videos:
+ log.error('-o/--output and --videos required')
+ return
+ import cv2 as cv
+ from tqdm import tqdm
+ from app.processors import face_detector
+ detector = face_detector.DetectorDLIBCNN()
+
+ # get file list
+ fp_videos = glob(join(dir_videos, '*.mp4'))
+ fp_videos += glob(join(dir_videos, '*.webm'))
+ fp_videos += glob(join(dir_videos, '*.mkv'))
+ face_interval = 30
+ frame_interval_count = 0
+ frame_count = 0
+
+ file_utils.mkdirs(dir_out)
+
+ for fp_video in tqdm(fp_videos):
+ # log.debug('opening: {}'.format(fp_video))
+ video = cv.VideoCapture(fp_video)
+ while video.isOpened():
+ res, frame = video.read()
+ if not res:
+ break
+
+ frame_count += 1 # for naming
+ frame_interval_count += 1 # for interval
+ bboxes = detector.detect(frame, opt_size=(320, 240), opt_pyramids=0)
+ if len(bboxes) > 0 and frame_interval_count >= face_interval:
+ # save frame
+ fp_frame = join(dir_out, '{}_{}.jpg'.format(Path(fp_video).stem, file_utils.zpad(frame_count)))
+ cv.imwrite(fp_frame, frame)
+ frame_interval_count = 0
+
+
+def handle_metadata(fp_in, fp_out):
+
+ keys = ['description', 'average_rating', 'dislike_count', 'categories',
+ 'thumbnail', 'title', 'upload_date', 'uploader_url', 'uploader_id',
+ 'fps', 'height', 'width', 'like_count', 'license', 'tags']
+
+ import youtube_dl
+
+ ydl = youtube_dl.YoutubeDL({'outtmpl': '%(id)s%(ext)s'})
+
+ df = pd.read_csv(fp_in)
+ data_exp = []
+
+ for i, row in df.iterrows():
+ video_data = {'url': row['url'], 'id': row['id']}
+ try:
+ with ydl:
+ url = 'http://www.youtube.com/watch?v={}'.format(row['id'])
+ result = ydl.extract_info(url, download=False)
+ video = result['entries'][0] if 'entries' in result else result
+ for k in keys:
+ val = video[k]
+ if k == 'title':
+ log.debug(val)
+ if type(val) == list:
+ val = '; '.join(val)
+ if type(val) == str:
+ video_data[k] = str(val).replace(',',';')
+ # log.debug('video_data: {}'.format(video_data))
+ except Exception as e:
+ log.warn('video unavilable: {}'.format(row['url']))
+ log.error(e)
+ continue
+ data_exp.append(video_data)
+
+ df_exp = pd.DataFrame.from_dict(data_exp)
+ df_exp.to_csv(fp_out)
+
+
+def handle_download(fp_in, dir_out):
+ import youtube_dl
+ df = pd.read_csv(fp_in)
+ fp_videos = glob(join(dir_out, '*.mp4'))
+ fp_videos += glob(join(dir_out, '*.webm'))
+ fp_videos += glob(join(dir_out, '*.mkv'))
+
+ ydl = youtube_dl.YoutubeDL({'outtmpl': '%(id)s%(ext)s'})
+
+ for i, row in df.iterrows():
+ vid = row['id']
+ found = False
+ for fp_video in fp_videos:
+ if vid in fp_video:
+ log.debug('skip: {}'.format(vid))
+ found = True
+ if not found:
+ try:
+ with ydl:
+ ydl.download(['http://www.youtube.com/watch?v={}'.format(vid)])
+ except:
+ log.error('could not dl: {}'.format(vid))
+
+
+def handle_info(fp_in, fp_out):
+ if not fp_out:
+ log.error('--output required')
+ return
+ urls = file_utils.load_text(fp_in)
+ videos = []
+ for url in urls:
+ splits = url.split('v=')
+ try:
+ vid = splits[1]
+ vid = vid.split('&')[0]
+ videos.append({'url': url, 'id': vid})
+ except:
+ log.warn('no video id for {}'.format(url))
+ # convert to df
+ df = pd.DataFrame.from_dict(videos)
+ df.to_csv(opt_fp_out)
+
+
+def handle_rename(fp_in, fp_out, dir_videos):
+ import shutil
+
+ if not dir_videos:
+ log.error('--videos required')
+ return
+
+ fp_videos = glob(join(dir_videos, '*.mp4'))
+ fp_videos += glob(join(dir_videos, '*.webm'))
+ fp_videos += glob(join(dir_videos, '*.mkv'))
+
+ df = pd.read_csv(fp_in)
+
+ for i, row in df.iterrows():
+ vid = row['id']
+ fp_videos_copy = fp_videos.copy()
+ for fp_video in fp_videos:
+ if vid in fp_video:
+ dst = join(dir_videos, '{}{}'.format(vid, Path(fp_video).suffix))
+ shutil.move(fp_video, dst)
+ log.debug('move {} to {}'.format(fp_video, dst))
+ fp_videos.remove(fp_video)
+ break \ No newline at end of file