summaryrefslogtreecommitdiff
path: root/megapixels/commands/datasets
diff options
context:
space:
mode:
Diffstat (limited to 'megapixels/commands/datasets')
-rw-r--r--megapixels/commands/datasets/add_uuid.py44
-rw-r--r--megapixels/commands/datasets/filter_by_pose.py101
-rw-r--r--megapixels/commands/datasets/filter_poses.py76
-rw-r--r--megapixels/commands/datasets/gen_filepath.py (renamed from megapixels/commands/datasets/file_meta.py)50
-rw-r--r--megapixels/commands/datasets/gen_sha256.py152
-rw-r--r--megapixels/commands/datasets/gen_uuid.py65
-rw-r--r--megapixels/commands/datasets/lookup.py26
-rw-r--r--megapixels/commands/datasets/sha256.py89
8 files changed, 363 insertions, 240 deletions
diff --git a/megapixels/commands/datasets/add_uuid.py b/megapixels/commands/datasets/add_uuid.py
deleted file mode 100644
index 9c14c0e3..00000000
--- a/megapixels/commands/datasets/add_uuid.py
+++ /dev/null
@@ -1,44 +0,0 @@
-import click
-
-from app.settings import types
-from app.utils import click_utils
-from app.settings import app_cfg as cfg
-from app.utils.logger_utils import Logger
-
-log = Logger.getLogger()
-
-@click.command()
-@click.option('-i', '--input', 'opt_fp_in', required=True,
- help='Input directory')
-@click.option('-o', '--output', 'opt_fp_out',
- help='Output directory')
-@click.option('-f', '--force', 'opt_force', is_flag=True,
- help='Force overwrite file')
-@click.pass_context
-def cli(ctx, opt_fp_in, opt_fp_out, opt_force):
- """Appends UUID to records CSV"""
-
- from glob import glob
- from os.path import join
- from pathlib import Path
- import base64
- import uuid
-
- from tqdm import tqdm
- import pandas as pd
-
- if not opt_force and Path(opt_fp_out).exists():
- log.error('File exists. Use "-f / --force" to overwite')
- return
-
- # load names
- df_records = pd.read_csv(opt_fp_in)
- records = df_records.to_dict('index')
- # append a UUID to every entry
- for idx, item in records.items():
- records[idx]['uuid'] = uuid.uuid4()
- # save to csv
- df_uuid = pd.DataFrame.from_dict(list(records.values())) # ignore the indices
- df_uuid.to_csv(opt_fp_out, index=False)
-
- log.info('done') \ No newline at end of file
diff --git a/megapixels/commands/datasets/filter_by_pose.py b/megapixels/commands/datasets/filter_by_pose.py
new file mode 100644
index 00000000..6fdbef98
--- /dev/null
+++ b/megapixels/commands/datasets/filter_by_pose.py
@@ -0,0 +1,101 @@
+import click
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+from app.utils.logger_utils import Logger
+
+log = Logger.getLogger()
+
+@click.command()
+@click.option('-i', '--input', 'opt_fp_in', default=None,
+ help='Override enum input filename CSV')
+@click.option('-o', '--output', 'opt_fp_out', default=None,
+ help='Override enum output filename CSV')
+@click.option('--data_store', 'opt_data_store',
+ type=cfg.DataStoreVar,
+ default=click_utils.get_default(types.DataStore.SSD),
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--dataset', 'opt_dataset',
+ type=cfg.DatasetVar,
+ required=True,
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--yaw', 'opt_yaw', type=(float, float), default=cfg.POSE_MINMAX_YAW,
+ help='Yaw (min, max)')
+@click.option('--roll', 'opt_roll', type=(float, float), default=cfg.POSE_MINMAX_ROLL,
+ help='Roll (min, max)')
+@click.option('--pitch', 'opt_pitch', type=(float, float), default=cfg.POSE_MINMAX_PITCH,
+ help='Pitch (min, max)')
+@click.pass_context
+def cli(ctx, opt_fp_in, opt_fp_out, opt_data_store, opt_dataset, opt_yaw, opt_roll, opt_pitch):
+ """Filter out exaggerated poses"""
+
+ import sys
+ from os.path import join
+ from pathlib import Path
+ import shutil
+ from datetime import datetime
+
+ import pandas as pd
+ from tqdm import tqdm
+
+ from app.models.data_store import DataStore
+ from app.utils import file_utils
+
+ # create date store
+ data_store = DataStore(opt_data_store, opt_dataset)
+ # load pose
+ fp_pose = data_store.metadata(types.Metadata.FACE_POSE)
+ df_pose = pd.read_csv(fp_pose).set_index('index')
+ # load roi
+ fp_roi = data_store.metadata(types.Metadata.FACE_ROI)
+ df_roi = pd.read_csv(fp_roi).set_index('index')
+ # load filepath
+ fp_filepath = data_store.metadata(types.Metadata.FILEPATH)
+ df_filepath = pd.read_csv(fp_filepath).set_index('index')
+ # load uuid
+ fp_uuid= data_store.metadata(types.Metadata.UUID)
+ df_uuid = pd.read_csv(fp_uuid).set_index('index')
+ # load sha256 index
+ fp_sha256 = data_store.metadata(types.Metadata.SHA256)
+ df_sha256 = pd.read_csv(fp_sha256).set_index('index')
+ # debug
+ log.info('Processing {:,} rows'.format(len(df_pose)))
+ n_rows = len(df_pose)
+
+ # filter out extreme poses
+ invalid_indices = []
+ for ds_pose in tqdm(df_pose.itertuples(), total=len(df_pose)):
+ if ds_pose.yaw < opt_yaw[0] or ds_pose.yaw > opt_yaw[1] \
+ and ds_pose.roll < opt_roll[0] or ds_pose.roll > opt_roll[1] \
+ and ds_pose.pitch < opt_pitch[0] or ds_pose.pitch > opt_pitch[1]:
+ invalid_indices.append(ds_pose.Index) # unique file indexs
+
+ # filter out valid/invalid
+ log.info(invalid_indices[:20])
+ log.info(f'Removing {len(invalid_indices)} invalid indices...')
+ df_filepath = df_filepath.drop(df_pose.index[invalid_indices])
+ df_sha256 = df_sha256.drop(df_pose.index[invalid_indices])
+ df_uuid = df_uuid.drop(df_pose.index[invalid_indices])
+ df_roi = df_roi.drop(df_pose.index[invalid_indices])
+ df_pose = df_pose.drop(df_pose.index[invalid_indices])
+ log.info(f'Removed {n_rows - len(df_pose)}')
+
+ # move file to make backup
+ dir_bkup = join(Path(fp_pose).parent, f'backup_{datetime.now():%Y%m%d_%M%S}')
+ file_utils.mkdirs(dir_bkup)
+ # move files to backup
+ shutil.move(fp_filepath, join(dir_bkup, Path(fp_filepath).name))
+ shutil.move(fp_sha256, join(dir_bkup, Path(fp_sha256).name))
+ shutil.move(fp_uuid, join(dir_bkup, Path(fp_uuid).name))
+ shutil.move(fp_roi, join(dir_bkup, Path(fp_roi).name))
+ shutil.move(fp_pose, join(dir_bkup, Path(fp_pose).name))
+ # save filtered poses
+ df_filepath.to_csv(fp_filepath)
+ df_sha256.to_csv(fp_sha256)
+ df_uuid.to_csv(fp_uuid)
+ df_roi.to_csv(fp_roi)
+ df_pose.to_csv(fp_pose)
+
diff --git a/megapixels/commands/datasets/filter_poses.py b/megapixels/commands/datasets/filter_poses.py
deleted file mode 100644
index 304eeff2..00000000
--- a/megapixels/commands/datasets/filter_poses.py
+++ /dev/null
@@ -1,76 +0,0 @@
-import click
-
-from app.settings import types
-from app.utils import click_utils
-from app.settings import app_cfg as cfg
-from app.utils.logger_utils import Logger
-
-log = Logger.getLogger()
-
-@click.command()
-@click.option('-i', '--input', 'opt_fp_in', required=True,
- help='Input directory')
-@click.option('-o', '--output', 'opt_fp_out', required=True,
- help='Output directory')
-@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None),
- help='Slice list of files')
-@click.option('-f', '--force', 'opt_force', is_flag=True,
- help='Force overwrite file')
-@click.option('--yaw', 'opt_yaw', type=(float, float), default=(-25,25),
- help='Yaw (min, max)')
-@click.option('--roll', 'opt_roll', type=(float, float), default=(-15,15),
- help='Roll (min, max)')
-@click.option('--pitch', 'opt_pitch', type=(float, float), default=(-10,10),
- help='Pitch (min, max)')
-@click.option('--drop', 'opt_drop', type=click.Choice(['valid', 'invalid']), default='invalid',
- help='Drop valid or invalid poses')
-@click.pass_context
-def cli(ctx, opt_fp_in, opt_fp_out, opt_slice, opt_yaw, opt_roll, opt_pitch,
- opt_drop, opt_force):
- """Filter out exaggerated poses"""
-
- from glob import glob
- from os.path import join
- from pathlib import Path
- import time
- from multiprocessing.dummy import Pool as ThreadPool
- import random
-
- import pandas as pd
- from tqdm import tqdm
- from glob import glob
-
- from app.utils import file_utils, im_utils
-
-
- if not opt_force and Path(opt_fp_out).exists():
- log.error('File exists. Use "-f / --force" to overwite')
- return
-
- df_poses = pd.read_csv(opt_fp_in).set_index('index')
-
- if opt_slice:
- df_poses = df_poses[opt_slice[0]:opt_slice[1]]
-
- log.info('Processing {:,} rows'.format(len(df_poses)))
-
- # extend a new temporary column
- df_poses['valid'] = [0] * len(df_poses)
-
- # filter out extreme poses
- for ds_pose in tqdm(df_poses.itertuples(), total=len(df_poses)):
- if ds_pose.yaw > opt_yaw[0] and ds_pose.yaw < opt_yaw[1] \
- and ds_pose.roll > opt_roll[0] and ds_pose.roll < opt_roll[1] \
- and ds_pose.pitch > opt_pitch[0] and ds_pose.pitch < opt_pitch[1]:
- df_poses.at[ds_pose.Index, 'valid'] = 1
-
- # filter out valid/invalid
- drop_val = 0 if opt_drop == 'valid' else 0 # drop 0's if drop == valid, else drop 1's
- df_poses_filtered = df_poses.drop(df_poses[df_poses.valid == int()].index, axis=0)
-
- # drop temp column
- df_poses_filtered = df_poses_filtered.drop('valid', axis=1)
-
- # save filtered poses
- df_poses_filtered.to_csv(opt_fp_out)
- log.info('Saved {:,} rows'.format(len(df_poses_filtered))) \ No newline at end of file
diff --git a/megapixels/commands/datasets/file_meta.py b/megapixels/commands/datasets/gen_filepath.py
index e1456f44..e06fee6b 100644
--- a/megapixels/commands/datasets/file_meta.py
+++ b/megapixels/commands/datasets/gen_filepath.py
@@ -12,10 +12,20 @@ from app.utils.logger_utils import Logger
log = Logger.getLogger()
@click.command()
-@click.option('-i', '--input', 'opt_fp_in', required=True,
- help='Input directory')
-@click.option('-o', '--output', 'opt_fp_out', required=True,
- help='Output file for file meta CSV')
+@click.option('-i', '--input', 'opt_fp_in',
+ help='Override enum input filename CSV')
+@click.option('-o', '--output', 'opt_fp_out',
+ help='Override enum output filename CSV')
+@click.option('--data_store', 'opt_data_store',
+ type=cfg.DataStoreVar,
+ default=click_utils.get_default(types.DataStore.NAS),
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--dataset', 'opt_dataset',
+ type=cfg.DatasetVar,
+ required=True,
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None),
help='Slice list of files')
@click.option('--recursive/--no-recursive', 'opt_recursive', is_flag=True, default=False,
@@ -25,7 +35,8 @@ log = Logger.getLogger()
@click.option('-f', '--force', 'opt_force', is_flag=True,
help='Force overwrite file')
@click.pass_context
-def cli(ctx, opt_fp_in, opt_fp_out, opt_slice, opt_recursive, opt_threads, opt_force):
+def cli(ctx, opt_fp_in, opt_fp_out, opt_data_store, opt_dataset, opt_slice,
+ opt_recursive, opt_threads, opt_force):
"""Multithreading test"""
from glob import glob
@@ -39,21 +50,26 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_slice, opt_recursive, opt_threads, opt_f
from tqdm import tqdm
from glob import glob
+ from app.models import DataStore
from app.utils import file_utils, im_utils
-
- if not opt_force and Path(opt_fp_out).exists():
+ data_store = DataStore(opt_data_store, opt_dataset)
+ fp_out = opt_fp_out if opt_fp_out is not None else data_store.metadata(types.Metadata.FILEPATH)
+ if not opt_force and Path(fp_out).exists():
log.error('File exists. Use "-f / --force" to overwite')
return
+
+ # glob files
+ fp_in = opt_fp_in if opt_fp_in is not None else data_store.media_images_original()
fp_ims = []
- log.info(f'Globbing {opt_fp_in}')
+ log.info(f'Globbing {fp_in}')
for ext in ['jpg', 'png']:
if opt_recursive:
- fp_glob = join(opt_fp_in, '**/*.{}'.format(ext))
+ fp_glob = join(fp_in, '**/*.{}'.format(ext))
fp_ims += glob(fp_glob, recursive=True)
else:
- fp_glob = join(opt_fp_in, '*.{}'.format(ext))
+ fp_glob = join(fp_in, '*.{}'.format(ext))
fp_ims += glob(fp_glob)
if not fp_ims:
@@ -63,14 +79,14 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_slice, opt_recursive, opt_threads, opt_f
if opt_slice:
fp_ims = fp_ims[opt_slice[0]:opt_slice[1]]
- log.info('Processing {:,} images'.format(len(fp_ims)))
+ log.info('Found {:,} images'.format(len(fp_ims)))
# convert data to dict
data = []
for i, fp_im in enumerate(tqdm(fp_ims)):
fpp_im = Path(fp_im)
- subdir = str(fpp_im.parent.relative_to(opt_fp_in))
+ subdir = str(fpp_im.parent.relative_to(fp_in))
data.append( {
'subdir': subdir,
'fn': fpp_im.stem,
@@ -78,7 +94,9 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_slice, opt_recursive, opt_threads, opt_f
})
# save to CSV
- file_utils.mkdirs(opt_fp_out)
- df = pd.DataFrame.from_dict(data)
- df.index.name = 'index'
- df.to_csv(opt_fp_out) \ No newline at end of file
+ file_utils.mkdirs(fp_out)
+ df_filepath = pd.DataFrame.from_dict(data)
+ df_filepath = df_filepath.sort_values(by=['subdir'], ascending=True)
+ df_filepath = df_filepath.reset_index(drop=True)
+ df_filepath.index.name = 'index'
+ df_filepath.to_csv(fp_out) \ No newline at end of file
diff --git a/megapixels/commands/datasets/gen_sha256.py b/megapixels/commands/datasets/gen_sha256.py
new file mode 100644
index 00000000..1616eebf
--- /dev/null
+++ b/megapixels/commands/datasets/gen_sha256.py
@@ -0,0 +1,152 @@
+'''
+
+'''
+import click
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+from app.utils.logger_utils import Logger
+
+log = Logger.getLogger()
+
+identity_sources = ['subdir', 'subdir_head', 'subdir_tail']
+
+@click.command()
+@click.option('-i', '--input', 'opt_fp_in', default=None,
+ help='Override enum input filename CSV')
+@click.option('-o', '--output', 'opt_fp_out', default=None,
+ help='Override enum output filename CSV')
+@click.option('-m', '--media', 'opt_dir_media', default=None,
+ help='Override enum media directory')
+@click.option('--data_store', 'opt_data_store',
+ type=cfg.DataStoreVar,
+ default=click_utils.get_default(types.DataStore.NAS),
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--dataset', 'opt_dataset',
+ type=cfg.DatasetVar,
+ required=True,
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None),
+ help='Slice list of files')
+@click.option('-t', '--threads', 'opt_threads', default=12,
+ help='Number of threads')
+@click.option('-f', '--force', 'opt_force', is_flag=True,
+ help='Force overwrite file')
+@click.option('--identity', 'opt_identity', default='subdir_tail', type=click.Choice(identity_sources),
+ help='Identity source, blank for no identity')
+@click.pass_context
+def cli(ctx, opt_fp_in, opt_fp_out, opt_dataset, opt_data_store, opt_dir_media, opt_slice, opt_threads,
+ opt_identity, opt_force):
+ """Generates sha256/identity index CSV file"""
+
+ import sys
+ from glob import glob
+ from os.path import join
+ from pathlib import Path
+ import time
+ from multiprocessing.dummy import Pool as ThreadPool
+ import random
+
+ import pandas as pd
+ from tqdm import tqdm
+ from glob import glob
+
+ from app.models import DataStore
+ from app.utils import file_utils, im_utils
+
+
+ # set data_store
+ data_store = DataStore(opt_data_store, opt_dataset)
+ # get filepath out
+ fp_out = data_store.metadata(types.Metadata.SHA256) if opt_fp_out is None else opt_fp_out
+ # exit if exists
+ if not opt_force and Path(fp_out).exists():
+ log.error('File exists. Use "-f / --force" to overwite')
+ return
+ # get filepath in
+ fp_in = data_store.metadata(types.Metadata.FILEPATH)
+ df_files = pd.read_csv(fp_in).set_index('index')
+ # slice if you want
+ if opt_slice:
+ df_files = df_files[opt_slice[0]:opt_slice[1]]
+
+ log.info('Processing {:,} images'.format(len(df_files)))
+
+
+ # prepare list of images to multithread into sha256s
+ dir_media = data_store.media_images_original() if opt_dir_media is None else opt_dir_media
+ file_objs = []
+ for ds_file in df_files.itertuples():
+ fp_im = join(dir_media, str(ds_file.subdir), f"{ds_file.fn}.{ds_file.ext}")
+ # find the image_index
+ # append the subdir option, sort by this then increment by unique subdir
+ file_obj = {'fp': fp_im, 'index': ds_file.Index}
+ if opt_identity:
+ subdirs = ds_file.subdir.split('/')
+ if not len(subdirs) > 0:
+ log.error(f'Could not split subdir: "{ds_file.subdir}. Try different option for "--identity"')
+ log.error('exiting')
+ return
+ if opt_identity == 'subdir':
+ subdir = subdirs[0]
+ elif opt_identity == 'subdir_head':
+ # use first part of subdir path
+ subdir = subdirs[0]
+ elif opt_identity == 'subdir_tail':
+ # use last part of subdir path
+ subdir = subdirs[-1]
+ file_obj['identity_subdir'] = subdir
+ file_objs.append(file_obj)
+
+ # convert to thread pool
+ pbar = tqdm(total=len(file_objs))
+
+ def as_sha256(file_obj):
+ pbar.update(1)
+ file_obj['sha256'] = file_utils.sha256(file_obj['fp'])
+ return file_obj
+
+ # multithread pool
+ pool_file_objs = []
+ st = time.time()
+ pool = ThreadPool(opt_threads)
+ with tqdm(total=len(file_objs)) as pbar:
+ pool_file_objs = pool.map(as_sha256, file_objs)
+ pbar.close()
+
+ # convert data to dict
+ data = []
+ for pool_file_obj in pool_file_objs:
+ data.append( {
+ 'sha256': pool_file_obj['sha256'],
+ 'index': pool_file_obj['index'],
+ 'identity_subdir': pool_file_obj.get('identity_subdir', ''),
+ })
+
+ # sort based on identity_subdir
+ # save to CSV
+ df_sha256 = pd.DataFrame.from_dict(data)
+ # add new column for identity
+ df_sha256['identity_index'] = [1] * len(df_sha256)
+ df_sha256 = df_sha256.sort_values(by=['identity_subdir'], ascending=True)
+ df_sha256_identity_groups = df_sha256.groupby('identity_subdir')
+ for identity_index, df_sha256_identity_group_tuple in enumerate(df_sha256_identity_groups):
+ identity_subdir, df_sha256_identity_group = df_sha256_identity_group_tuple
+ for ds_sha256 in df_sha256_identity_group.itertuples():
+ df_sha256.at[ds_sha256.Index, 'identity_index'] = identity_index
+ # drop temp identity subdir column
+ df_sha256 = df_sha256.drop('identity_subdir', axis=1)
+ # write to CSV
+ log.info(f'rows: {len(df_sha256)}')
+ file_utils.mkdirs(fp_out)
+ df_sha256.set_index('index')
+ df_sha256 = df_sha256.sort_values(['index'], ascending=[True])
+ df_sha256.to_csv(fp_out, index=False)
+
+ # timing
+ log.info(f'wrote file: {fp_out}')
+ log.info('time: {:.2f}, theads: {}'.format(time.time() - st, opt_threads))
+ \ No newline at end of file
diff --git a/megapixels/commands/datasets/gen_uuid.py b/megapixels/commands/datasets/gen_uuid.py
new file mode 100644
index 00000000..612c43ee
--- /dev/null
+++ b/megapixels/commands/datasets/gen_uuid.py
@@ -0,0 +1,65 @@
+import click
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+from app.utils.logger_utils import Logger
+
+log = Logger.getLogger()
+
+@click.command()
+@click.option('-i', '--input', 'opt_fp_in', default=None,
+ help='Override enum input filename CSV')
+@click.option('-o', '--output', 'opt_fp_out', default=None,
+ help='Override enum output filename CSV')
+@click.option('--data_store', 'opt_data_store',
+ type=cfg.DataStoreVar,
+ default=click_utils.get_default(types.DataStore.NAS),
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--dataset', 'opt_dataset',
+ type=cfg.DatasetVar,
+ required=True,
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('-f', '--force', 'opt_force', is_flag=True,
+ help='Force overwrite file')
+@click.pass_context
+def cli(ctx, opt_fp_in, opt_fp_out, opt_data_store, opt_dataset, opt_force):
+ """Appends UUID to records CSV"""
+
+ from glob import glob
+ from os.path import join
+ from pathlib import Path
+ import base64
+ import uuid
+
+ from tqdm import tqdm
+ import pandas as pd
+
+ from app.models import DataStore
+
+
+ # set data_store
+ data_store = DataStore(opt_data_store, opt_dataset)
+ # get filepath out
+ fp_out = data_store.metadata(types.Metadata.UUID) if opt_fp_out is None else opt_fp_out
+ # exit if exists
+ if not opt_force and Path(fp_out).exists():
+ log.error('File exists. Use "-f / --force" to overwite')
+ return
+
+ # load sha256 records
+ fp_in = data_store.metadata(types.Metadata.SHA256) if opt_fp_in is None else opt_fp_in
+ log.info(f'Loading: {fp_in}')
+ df_records = pd.read_csv(fp_in).set_index('index')
+
+ df_uuids = df_records.copy()
+ df_uuids['uuid'] = [uuid.uuid4()] * len(df_uuids)
+
+ for df_record in tqdm(df_records.itertuples(), total=len(df_uuids)):
+ image_index = df_record.Index
+ df_uuids.at[image_index, 'uuid'] = uuid.uuid4()
+
+ df_uuids = df_uuids.drop(['sha256', 'identity_index'], axis=1)
+ df_uuids.to_csv(fp_out) \ No newline at end of file
diff --git a/megapixels/commands/datasets/lookup.py b/megapixels/commands/datasets/lookup.py
index e84bdf3e..5a2a171e 100644
--- a/megapixels/commands/datasets/lookup.py
+++ b/megapixels/commands/datasets/lookup.py
@@ -6,8 +6,10 @@ from app.utils import click_utils
from app.settings import app_cfg as cfg
from app.utils.logger_utils import Logger
+log = Logger.getLogger()
+
@click.command()
-@click.option('--index', 'opt_index', type=int,
+@click.option('--index', 'opt_index', type=int, required=True,
help='Vector index to lookup')
@click.option('--data_store', 'opt_data_store',
type=cfg.DataStoreVar,
@@ -19,12 +21,8 @@ from app.utils.logger_utils import Logger
required=True,
show_default=True,
help=click_utils.show_help(types.Dataset))
-@click.option('--metadata', 'opt_metadata_type', required=True,
- type=cfg.MetadataVar,
- show_default=True,
- help=click_utils.show_help(types.Metadata))
@click.pass_context
-def cli(ctx, opt_index, opt_data_store, opt_dataset, opt_metadata_type):
+def cli(ctx, opt_index, opt_data_store, opt_dataset):
"""Display image info"""
import sys
@@ -37,22 +35,20 @@ def cli(ctx, opt_index, opt_data_store, opt_dataset, opt_metadata_type):
import cv2 as cv
from tqdm import tqdm
- from app.utils import file_utils, im_utils, path_utils
+ from app.utils import file_utils, im_utils
+ from app.models.data_store import DataStore
log = Logger.getLogger()
-
- log.info(f'creating dataset: {opt_dataset}')
- dataset = Dataset(opt_dataset)
- # loads all CSV files, may take a while
- log.info(f'loading dataset...')
- dataset.load(opt_data_store)
+ # init dataset
+ dataset = Dataset(opt_data_store, opt_dataset)
+ # set data store and load files
+ dataset.load()
# find image records
image_record = dataset.roi_idx_to_record(opt_index)
# debug
image_record.summarize()
# load image
- fp_im = image_record.filepath
- im = cv.imread(fp_im)
+ im = cv.imread(image_record.filepath)
# display
cv.imshow('', im)
# cv gui
diff --git a/megapixels/commands/datasets/sha256.py b/megapixels/commands/datasets/sha256.py
deleted file mode 100644
index 4c734073..00000000
--- a/megapixels/commands/datasets/sha256.py
+++ /dev/null
@@ -1,89 +0,0 @@
-import click
-
-from app.settings import types
-from app.utils import click_utils
-from app.settings import app_cfg as cfg
-from app.utils.logger_utils import Logger
-
-log = Logger.getLogger()
-
-@click.command()
-@click.option('-i', '--input', 'opt_fp_in', required=True,
- help='Input directory')
-@click.option('-m', '--media', 'opt_dir_media', required=True,
- help='Input media directory')
-@click.option('-o', '--output', 'opt_fp_out', required=True,
- help='Output directory')
-@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None),
- help='Slice list of files')
-@click.option('-t', '--threads', 'opt_threads', default=4,
- help='Number of threads')
-@click.option('-f', '--force', 'opt_force', is_flag=True,
- help='Force overwrite file')
-@click.pass_context
-def cli(ctx, opt_fp_in, opt_dir_media, opt_fp_out, opt_slice, opt_threads, opt_force):
- """Multithreading test"""
-
- from glob import glob
- from os.path import join
- from pathlib import Path
- import time
- from multiprocessing.dummy import Pool as ThreadPool
- import random
-
- import pandas as pd
- from tqdm import tqdm
- from glob import glob
-
- from app.utils import file_utils, im_utils
-
-
- if not opt_force and Path(opt_fp_out).exists():
- log.error('File exists. Use "-f / --force" to overwite')
- return
-
- df_files = pd.read_csv(opt_fp_in).set_index('index')
-
- if opt_slice:
- df_files = df_files[opt_slice[0]:opt_slice[1]]
-
- log.info('Processing {:,} images'.format(len(df_files)))
-
-
- # prepare list of images to multithread into sha256s
- file_objs = []
- for ds_file in df_files.itertuples():
- fp_im = join(opt_dir_media, str(ds_file.subdir), f"{ds_file.fn}.{ds_file.ext}")
- file_objs.append({'fp': fp_im, 'index': ds_file.Index})
-
- # convert to thread pool
- pbar = tqdm(total=len(file_objs))
-
- def as_sha256(file_obj):
- pbar.update(1)
- file_obj['sha256'] = file_utils.sha256(file_obj['fp'])
- return file_obj
-
- # multithread pool
- pool_file_objs = []
- st = time.time()
- pool = ThreadPool(opt_threads)
- with tqdm(total=len(file_objs)) as pbar:
- pool_file_objs = pool.map(as_sha256, file_objs)
- pbar.close()
-
- # convert data to dict
- data = []
- for pool_file_obj in pool_file_objs:
- data.append( {
- 'sha256': pool_file_obj['sha256'],
- 'index': pool_file_obj['index']
- })
-
- # save to CSV
- file_utils.mkdirs(opt_fp_out)
- df = pd.DataFrame.from_dict(data)
- df.to_csv(opt_fp_out, index=False)
-
- # timing
- log.info('time: {:.2f}, theads: {}'.format(time.time() - st, opt_threads)) \ No newline at end of file