summaryrefslogtreecommitdiff
path: root/megapixels/commands/datasets
diff options
context:
space:
mode:
Diffstat (limited to 'megapixels/commands/datasets')
-rw-r--r--megapixels/commands/datasets/50people.py129
-rw-r--r--megapixels/commands/datasets/megaface_flickr_api.py141
-rw-r--r--megapixels/commands/datasets/megaface_names.py65
-rw-r--r--megapixels/commands/datasets/sha256.py90
-rw-r--r--megapixels/commands/datasets/ytmu.py205
5 files changed, 630 insertions, 0 deletions
diff --git a/megapixels/commands/datasets/50people.py b/megapixels/commands/datasets/50people.py
new file mode 100644
index 00000000..fb35b2fe
--- /dev/null
+++ b/megapixels/commands/datasets/50people.py
@@ -0,0 +1,129 @@
+from glob import glob
+import os
+from os.path import join
+from pathlib import Path
+
+import click
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+from app.utils import logger_utils
+
+import dlib
+import pandas as pd
+from PIL import Image, ImageOps, ImageFilter
+from app.utils import file_utils, im_utils
+
+
+log = logger_utils.Logger.getLogger()
+
+@click.command()
+@click.option('-i', '--input', 'opt_fp_in', required=True,
+ help='Input directory')
+@click.option('-o', '--output', 'opt_fp_out',
+ help='Output directory')
+@click.option('--media', 'opt_dir_media',
+ help='Output directory')
+@click.option('--action', 'opt_action',
+ type=click.Choice(['download']),
+ default='info',
+ help='Command action')
+@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None),
+ help='Slice list of files')
+@click.pass_context
+def cli(ctx, opt_fp_in, opt_fp_out, opt_dir_media, opt_action, opt_slice):
+ """YTMU utils"""
+
+
+ from tqdm import tqdm
+
+ # -------------------------------------------------
+ # process
+
+ if opt_action == 'download':
+ # downloads video files with ytdl
+ handle_download(opt_fp_in, opt_fp_out, opt_slice)
+ elif opt_action == 'face_frames':
+ handle_face_frames(opt_fp_in, opt_fp_out, dir_media, opt_slice)
+
+
+
+
+
+def handle_face_frames(fp_in, dir_out, dir_videos):
+ if not dir_out or not dir_videos:
+ log.error('-o/--output and --videos required')
+ return
+
+ import cv2 as cv
+ from tqdm import tqdm
+ from app.processors import face_detector
+ detector = face_detector.DetectorDLIBCNN()
+
+ # get file list
+ fp_videos = glob(join(dir_videos, '*.mp4'))
+ fp_videos += glob(join(dir_videos, '*.webm'))
+ fp_videos += glob(join(dir_videos, '*.mkv'))
+
+ face_interval = 30
+ frame_interval_count = 0
+ frame_count = 0
+
+ file_utils.mkdirs(dir_out)
+
+ for fp_video in tqdm(fp_videos):
+ # log.debug('opening: {}'.format(fp_video))
+ video = cv.VideoCapture(fp_video)
+ while video.isOpened():
+ res, frame = video.read()
+ if not res:
+ break
+
+ frame_count += 1 # for naming
+ frame_interval_count += 1 # for interval
+ bboxes = detector.detect(frame, opt_size=(320, 240), opt_pyramids=0)
+ if len(bboxes) > 0 and frame_interval_count >= face_interval:
+ # save frame
+ fp_frame = join(dir_out, '{}_{}.jpg'.format(Path(fp_video).stem, file_utils.zpad(frame_count)))
+ cv.imwrite(fp_frame, frame)
+ frame_interval_count = 0
+
+
+def handle_download(fp_in, dir_out, opt_slice):
+ import youtube_dl
+ df = pd.read_csv(fp_in)
+ if opt_slice:
+ df = df[opt_slice[0]:opt_slice[1]]
+ df = df.fillna('')
+ fp_videos = glob(join(dir_out, '*.mp4'))
+ fp_videos += glob(join(dir_out, '*.webm'))
+ fp_videos += glob(join(dir_out, '*.mkv'))
+
+ ydl = youtube_dl.YoutubeDL({'outtmpl': join(dir_out, '') + '%(id)s.%(ext)s'})
+
+ for i, row in df.iterrows():
+ vid = str(row['youtube_id'])
+ if not vid:
+ vid = row['vimeo_id']
+ if vid:
+ vid = str(int(vid))
+ url = 'https://vimeo.com/{}'.format(vid)
+ else:
+ url = 'https://youtube.com/watch?v={}'.format(vid)
+ if not vid:
+ log.warn('no video id: {} for {}'.format(vid, row['city']))
+ continue
+
+ found = False
+ for fp_video in fp_videos:
+ if vid in fp_video:
+ #log.debug('skip: {}'.format(vid))
+ found = True
+
+ if not found:
+ try:
+ with ydl:
+ ydl.download([url])
+ except:
+ log.error('could not dl: {}'.format(vid))
diff --git a/megapixels/commands/datasets/megaface_flickr_api.py b/megapixels/commands/datasets/megaface_flickr_api.py
new file mode 100644
index 00000000..62232ab8
--- /dev/null
+++ b/megapixels/commands/datasets/megaface_flickr_api.py
@@ -0,0 +1,141 @@
+from glob import glob
+import os
+from os.path import join
+from pathlib import Path
+
+import click
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+from app.utils import logger_utils
+
+import dlib
+import pandas as pd
+from PIL import Image, ImageOps, ImageFilter
+from app.utils import file_utils, im_utils
+
+
+log = logger_utils.Logger.getLogger()
+
+@click.command()
+@click.option('-i', '--input', 'opt_fp_in', required=True,
+ help='Input directory')
+@click.option('-o', '--output', 'opt_fp_out',
+ help='Output directory')
+@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None),
+ help='Slice list of files')
+@click.option('-d', '--delay', 'opt_delay', default=None, type=int,
+ help='Delay between API calls to prevent rate-limiting')
+@click.option('--checkpoints', 'opt_checkpoints', is_flag=True,
+ help='Save checkpoints')
+@click.option('--api_key', 'opt_api_key', envvar='FLICKR_API_KEY')
+@click.option('--api_secret', 'opt_api_secret', envvar='FLICKR_API_SECRET')
+@click.option('--checkpoint_interval', 'opt_ckpt_interval', default=10000,
+ help='Save checkpoint interval')
+@click.pass_context
+def cli(ctx, opt_fp_in, opt_fp_out, opt_slice, opt_api_key, opt_api_secret,
+ opt_delay, opt_checkpoints, opt_ckpt_interval):
+ """Appends Flickr API info to CSV"""
+
+ from tqdm import tqdm
+ from glob import glob
+ import time
+ import flickr_api # pip install flickr_api
+ from flickr_api.flickrerrors import FlickrAPIError
+
+ # -------------------------------------------------
+ # process
+
+ if not opt_api_key or not opt_api_secret:
+ log.error('source .env vars for Flickr API and try again')
+ return
+
+ # init Flickr API
+ flickr_api.set_keys(api_key=opt_api_key, api_secret=opt_api_secret)
+
+ # reqd in CSV
+ df_ids = pd.read_csv(opt_fp_in)
+ if opt_slice:
+ df_ids = df_ids[opt_slice[0]:opt_slice[1]]
+
+ log.info('Processing: {:,} items'.format(len(df_ids)))
+
+ # iterate MegaFace IDs
+ identities = []
+
+ tqdm.pandas()
+
+ for idx, df_id in tqdm(df_ids.iterrows(), total=len(df_ids)):
+ # a = flickr_api.Person(id='123456789@N01')
+ df_id_dict = dict(df_id)
+
+ # append relevant data
+ try:
+ person = flickr_api.Person(id=df_id['nsid'])
+ info = person.getInfo()
+ df_id_dict.update( {
+ 'user_name': info.get('username', ''),
+ 'location': info.get('location', ''),
+ 'real_name': info.get('realname', ''),
+ 'time_zone': info.get('timezone', {}).get('timezone_id', ''),
+ 'time_first_photo': info.get('photos_info', {}).get('firstdatetaken'),
+ 'photos_count': info.get('photos_info', {}).get('count'),
+ 'description': info.get('description', ''),
+ 'id': info.get('id'),
+ 'path_alias': info.get('path_alias', ''),
+ 'is_pro': info.get('ispro', ''),
+ 'url_photos': info.get('photosurl', ''),
+ 'url_profile': info.get('photosurl', ''),
+ 'url_mobile': info.get('mobileurl', ''),
+ })
+ identities.append(df_id_dict)
+
+ except FlickrAPIError as e:
+ log.error(e)
+
+
+ if opt_checkpoints:
+ if (idx + 1) % opt_ckpt_interval == 0:
+ df = pd.DataFrame.from_dict(identities)
+ fpp_out = Path(opt_fp_out)
+ opt_fp_out_ckpt = join(fpp_out.parent, '{}_ckpt_{}.csv'.format(fpp_out.stem, file_utils.zpad(idx + 1)))
+ log.info('Saving checkpoint {:,} to {}'.format(idx + 1, opt_fp_out_ckpt))
+ df.to_csv(opt_fp_out_ckpt, index=False)
+
+ if opt_delay:
+ time.sleep(opt_delay)
+
+
+ df = pd.DataFrame.from_dict(identities)
+ df.to_csv(opt_fp_out, index=False)
+
+ log.info('Wrote: {:,} lines to {}'.format(len(df), opt_fp_out))
+
+
+"""
+Example API data:
+{'id': '7124086@N07',
+ 'nsid': '7124086@N07',
+ 'ispro': 1,
+ 'can_buy_pro': 0,
+ 'iconserver': '2325',
+ 'iconfarm': 3,
+ 'path_alias': 'shirleylin',
+ 'has_stats': '1',
+ 'pro_badge': 'standard',
+ 'expire': '0',
+ 'username': 'ShirleyLin',
+ 'realname': 'Shirley Lin',
+ 'location': 'Fremont, California, US',
+ 'timezone': {'label': 'Pacific Time (US & Canada); Tijuana',
+ 'offset': '-08:00',
+ 'timezone_id': 'PST8PDT'},
+ 'description': '',
+ 'photosurl': 'https://www.flickr.com/photos/shirleylin/',
+ 'profileurl': 'https://www.flickr.com/people/shirleylin/',
+ 'mobileurl': 'https://m.flickr.com/photostream.gne?id=7102756',
+ 'photos_info': {'firstdatetaken': '2004-05-24 12:12:15',
+ 'firstdate': '1172556588',
+ 'count': 9665}}
+""" \ No newline at end of file
diff --git a/megapixels/commands/datasets/megaface_names.py b/megapixels/commands/datasets/megaface_names.py
new file mode 100644
index 00000000..01e93e2d
--- /dev/null
+++ b/megapixels/commands/datasets/megaface_names.py
@@ -0,0 +1,65 @@
+from glob import glob
+import os
+from os.path import join
+from pathlib import Path
+
+import click
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+from app.utils import logger_utils
+
+import dlib
+import pandas as pd
+from PIL import Image, ImageOps, ImageFilter
+from app.utils import file_utils, im_utils
+
+
+log = logger_utils.Logger.getLogger()
+
+@click.command()
+@click.option('-i', '--input', 'opt_fp_in', required=True,
+ help='Input directory')
+@click.option('-o', '--output', 'opt_fp_out',
+ help='Output directory')
+@click.pass_context
+def cli(ctx, opt_fp_in, opt_fp_out):
+ """Creates CSV of NSIDs from MegaFace"""
+
+ from tqdm import tqdm
+ from glob import glob
+
+ # -------------------------------------------------
+ # process
+ fp_im_dirs = glob(join(opt_fp_in, '**/'), recursive=True)
+
+ log.info('Found {} directories'.format(len(fp_im_dirs)))
+
+ identities = {}
+
+ for fp_im_dir in tqdm(fp_im_dirs):
+ # 1234567@N05_identity_1
+ try:
+ dir_id_name = Path(fp_im_dir).name
+ nsid = dir_id_name.split('_')[0]
+ identity_num = dir_id_name.split('_')[2]
+ id_key = '{}_{}'.format(nsid, identity_num)
+ num_images = len(glob(join(fp_im_dir, '*.jpg')))
+ if not id_key in identities.keys():
+ identities[id_key] = {'nsid': nsid, 'identity': identity_num, 'images': num_images}
+ else:
+ identities[id_key]['images'] += num_images
+ except Exception as e:
+ continue
+
+ # convert to dict
+ identities_list = [v for k, v in identities.items()]
+ df = pd.DataFrame.from_dict(identities_list)
+
+ file_utils.mkdirs(opt_fp_out)
+
+ log.info('Wrote {} lines to {}'.format(len(df), opt_fp_out))
+ df.to_csv(opt_fp_out, index=False)
+
+
diff --git a/megapixels/commands/datasets/sha256.py b/megapixels/commands/datasets/sha256.py
new file mode 100644
index 00000000..c04fb504
--- /dev/null
+++ b/megapixels/commands/datasets/sha256.py
@@ -0,0 +1,90 @@
+import click
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+from app.utils.logger_utils import Logger
+
+log = Logger.getLogger()
+
+@click.command()
+@click.option('-i', '--input', 'opt_fp_in', required=True,
+ help='Input directory')
+@click.option('-o', '--output', 'opt_fp_out',
+ help='Output directory')
+@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None),
+ help='Slice list of files')
+@click.option('--recursive/--no-recursive', 'opt_recursive', is_flag=True, default=False,
+ help='Use glob recursion (slower)')
+@click.option('-t', '--threads', 'opt_threads', default=4,
+ help='Number of threads')
+@click.option('-f', '--force', 'opt_force', is_flag=True,
+ help='Force overwrite file')
+@click.pass_context
+def cli(ctx, opt_fp_in, opt_fp_out, opt_slice, opt_recursive, opt_threads, opt_force):
+ """Multithreading test"""
+
+ from glob import glob
+ from os.path import join
+ from pathlib import Path
+ import time
+ from multiprocessing.dummy import Pool as ThreadPool
+ import random
+
+ import pandas as pd
+ from tqdm import tqdm
+ from glob import glob
+
+ from app.utils import file_utils, im_utils
+
+
+ if not opt_force and Path(opt_fp_out).exists():
+ log.error('File exists. Use "-f / --force" to overwite')
+ return
+
+ fp_ims = []
+ for ext in ['jpg', 'png']:
+ if opt_recursive:
+ fp_glob = join(opt_fp_in, '**/*.{}'.format(ext))
+ fp_ims += glob(fp_glob, recursive=True)
+ else:
+ fp_glob = join(opt_fp_in, '*.{}'.format(ext))
+ fp_ims += glob(fp_glob)
+
+ if opt_slice:
+ fp_ims = fp_ims[opt_slice[0]:opt_slice[1]]
+
+ log.info('Processing {:,} images'.format(len(fp_ims)))
+
+ pbar = tqdm(total=100)
+
+ def as_sha256(fp_im):
+ pbar.update(1)
+ return file_utils.sha256(fp_im)
+
+ # multithread pool
+ st = time.time()
+ pool = ThreadPool(opt_threads)
+ with tqdm(total=len(fp_ims)) as pbar:
+ sha256s = pool.map(as_sha256, fp_ims)
+ pbar.close()
+
+ # convert data to dict
+ data = []
+ for i, fp_im in enumerate(fp_ims):
+ fpp_im = Path(fp_im)
+ subdir = str(fpp_im.parent.relative_to(opt_fp_in))
+ sha256 = sha256s[i]
+ data.append( {
+ 'sha256': sha256,
+ 'subdir': subdir,
+ 'fn': fpp_im.stem,
+ 'ext': fpp_im.suffix.replace('.','')
+ })
+
+ # save to CSV
+ df = pd.DataFrame.from_dict(data)
+ df.to_csv(opt_fp_out, index=False)
+
+ # timing
+ log.info('time: {:.2f}, theads: {}'.format(time.time() - st, opt_threads)) \ No newline at end of file
diff --git a/megapixels/commands/datasets/ytmu.py b/megapixels/commands/datasets/ytmu.py
new file mode 100644
index 00000000..66680ed0
--- /dev/null
+++ b/megapixels/commands/datasets/ytmu.py
@@ -0,0 +1,205 @@
+from glob import glob
+import os
+from os.path import join
+from pathlib import Path
+
+import click
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+from app.utils import logger_utils
+
+import dlib
+import pandas as pd
+from PIL import Image, ImageOps, ImageFilter
+from app.utils import file_utils, im_utils
+
+
+log = logger_utils.Logger.getLogger()
+
+@click.command()
+@click.option('-i', '--input', 'opt_fp_in', required=True,
+ help='Input directory')
+@click.option('-o', '--output', 'opt_fp_out',
+ help='Output directory')
+@click.option('--videos', 'opt_dir_videos',
+ help='Output directory')
+@click.option('--action', 'opt_action',
+ type=click.Choice(['info', 'faces', 'rename', 'download', 'metadata', 'split_frames']),
+ default='info',
+ help='Command action')
+@click.pass_context
+def cli(ctx, opt_fp_in, opt_fp_out, opt_dir_videos, opt_action):
+ """YTMU utils"""
+
+
+ from tqdm import tqdm
+
+ # -------------------------------------------------
+ # process
+
+ if opt_action == 'metadata':
+ # downloads video metadata with ytdl
+ handle_metadata(opt_fp_in, opt_fp_out)
+ elif opt_action == 'download':
+ # downloads video files with ytdl
+ handle_download(opt_fp_in, opt_fp_out)
+ elif opt_action == 'info':
+ # converts original data file to clean CSV
+ handle_info()
+ elif opt_action == 'rename':
+ # rename the videos to video ID
+ handle_rename(opt_fp_in, opt_fp_out, opt_dir_videos)
+ elif opt_action == 'split_frames':
+ # rename the videos to video ID
+ handle_split_frames(opt_fp_in, opt_fp_out, opt_dir_videos)
+
+
+
+
+# ----------------------------------------------------
+# handlers
+
+def handle_split_frames(fp_in, dir_out, dir_videos):
+ if not dir_out or not dir_videos:
+ log.error('-o/--output and --videos required')
+ return
+ import cv2 as cv
+ from tqdm import tqdm
+ from app.processors import face_detector
+ detector = face_detector.DetectorDLIBCNN()
+
+ # get file list
+ fp_videos = glob(join(dir_videos, '*.mp4'))
+ fp_videos += glob(join(dir_videos, '*.webm'))
+ fp_videos += glob(join(dir_videos, '*.mkv'))
+ face_interval = 30
+ frame_interval_count = 0
+ frame_count = 0
+
+ file_utils.mkdirs(dir_out)
+
+ for fp_video in tqdm(fp_videos):
+ # log.debug('opening: {}'.format(fp_video))
+ video = cv.VideoCapture(fp_video)
+ while video.isOpened():
+ res, frame = video.read()
+ if not res:
+ break
+
+ frame_count += 1 # for naming
+ frame_interval_count += 1 # for interval
+ bboxes = detector.detect(frame, opt_size=(320, 240), opt_pyramids=0)
+ if len(bboxes) > 0 and frame_interval_count >= face_interval:
+ # save frame
+ fp_frame = join(dir_out, '{}_{}.jpg'.format(Path(fp_video).stem, file_utils.zpad(frame_count)))
+ cv.imwrite(fp_frame, frame)
+ frame_interval_count = 0
+
+
+def handle_metadata(fp_in, fp_out):
+
+ keys = ['description', 'average_rating', 'dislike_count', 'categories',
+ 'thumbnail', 'title', 'upload_date', 'uploader_url', 'uploader_id',
+ 'fps', 'height', 'width', 'like_count', 'license', 'tags']
+
+ import youtube_dl
+
+ ydl = youtube_dl.YoutubeDL({'outtmpl': '%(id)s%(ext)s'})
+
+ df = pd.read_csv(fp_in)
+ data_exp = []
+
+ for i, row in df.iterrows():
+ video_data = {'url': row['url'], 'id': row['id']}
+ try:
+ with ydl:
+ url = 'http://www.youtube.com/watch?v={}'.format(row['id'])
+ result = ydl.extract_info(url, download=False)
+ video = result['entries'][0] if 'entries' in result else result
+ for k in keys:
+ val = video[k]
+ if k == 'title':
+ log.debug(val)
+ if type(val) == list:
+ val = '; '.join(val)
+ if type(val) == str:
+ video_data[k] = str(val).replace(',',';')
+ # log.debug('video_data: {}'.format(video_data))
+ except Exception as e:
+ log.warn('video unavilable: {}'.format(row['url']))
+ log.error(e)
+ continue
+ data_exp.append(video_data)
+
+ df_exp = pd.DataFrame.from_dict(data_exp)
+ df_exp.to_csv(fp_out)
+
+
+def handle_download(fp_in, dir_out):
+ import youtube_dl
+ df = pd.read_csv(fp_in)
+ fp_videos = glob(join(dir_out, '*.mp4'))
+ fp_videos += glob(join(dir_out, '*.webm'))
+ fp_videos += glob(join(dir_out, '*.mkv'))
+
+ ydl = youtube_dl.YoutubeDL({'outtmpl': '%(id)s%(ext)s'})
+
+ for i, row in df.iterrows():
+ vid = row['id']
+ found = False
+ for fp_video in fp_videos:
+ if vid in fp_video:
+ log.debug('skip: {}'.format(vid))
+ found = True
+ if not found:
+ try:
+ with ydl:
+ ydl.download(['http://www.youtube.com/watch?v={}'.format(vid)])
+ except:
+ log.error('could not dl: {}'.format(vid))
+
+
+def handle_info(fp_in, fp_out):
+ if not fp_out:
+ log.error('--output required')
+ return
+ urls = file_utils.load_text(fp_in)
+ videos = []
+ for url in urls:
+ splits = url.split('v=')
+ try:
+ vid = splits[1]
+ vid = vid.split('&')[0]
+ videos.append({'url': url, 'id': vid})
+ except:
+ log.warn('no video id for {}'.format(url))
+ # convert to df
+ df = pd.DataFrame.from_dict(videos)
+ df.to_csv(opt_fp_out)
+
+
+def handle_rename(fp_in, fp_out, dir_videos):
+ import shutil
+
+ if not dir_videos:
+ log.error('--videos required')
+ return
+
+ fp_videos = glob(join(dir_videos, '*.mp4'))
+ fp_videos += glob(join(dir_videos, '*.webm'))
+ fp_videos += glob(join(dir_videos, '*.mkv'))
+
+ df = pd.read_csv(fp_in)
+
+ for i, row in df.iterrows():
+ vid = row['id']
+ fp_videos_copy = fp_videos.copy()
+ for fp_video in fp_videos:
+ if vid in fp_video:
+ dst = join(dir_videos, '{}{}'.format(vid, Path(fp_video).suffix))
+ shutil.move(fp_video, dst)
+ log.debug('move {} to {}'.format(fp_video, dst))
+ fp_videos.remove(fp_video)
+ break \ No newline at end of file