diff options
| author | Jules Laplace <julescarbon@gmail.com> | 2020-01-08 01:59:20 +0100 |
|---|---|---|
| committer | Jules Laplace <julescarbon@gmail.com> | 2020-01-08 01:59:20 +0100 |
| commit | 2034d4c0cd241106900273980ee84f808a73d196 (patch) | |
| tree | e1a331d6fd0288561a5b4944eadcdcb25514ac2b | |
| parent | a194eaa66108d753aac1eac70b7016a9b20897e1 (diff) | |
up
| -rw-r--r-- | cli/app/commands/biggan/extract_dense_vectors.py | 7 | ||||
| -rw-r--r-- | cli/app/commands/biggan/search_class.py | 7 | ||||
| -rw-r--r-- | cli/app/search/search_class.py | 12 |
3 files changed, 16 insertions, 10 deletions
diff --git a/cli/app/commands/biggan/extract_dense_vectors.py b/cli/app/commands/biggan/extract_dense_vectors.py index 9f7644e..5320496 100644 --- a/cli/app/commands/biggan/extract_dense_vectors.py +++ b/cli/app/commands/biggan/extract_dense_vectors.py @@ -25,9 +25,11 @@ from app.search.json import params_dense_dict help='Feature layers used for loss') @click.option('-snap', '--snapshot_interval', 'opt_snapshot_interval', default=20, help='Interval to store sample images') +@click.option('-clip', '--clip_interval', 'opt_clip_interval', default=500, + help='Interval to clip vectors') @click.pass_context def cli(ctx, opt_folder_id, opt_latent_steps, opt_dense_steps, opt_video, - opt_stochastic_clipping, opt_label_clipping, opt_use_feature_detector, opt_feature_layers, opt_snapshot_interval): + opt_stochastic_clipping, opt_label_clipping, opt_use_feature_detector, opt_feature_layers, opt_snapshot_interval, opt_clip_interval): """ The full process: - Fetch new images from the cortex @@ -59,7 +61,8 @@ def cli(ctx, opt_folder_id, opt_latent_steps, opt_dense_steps, opt_video, opt_label_clipping=opt_label_clipping, opt_use_feature_detector=opt_use_feature_detector, opt_feature_layers=opt_feature_layers, - opt_snapshot_interval=opt_snapshot_interval + opt_snapshot_interval=opt_snapshot_interval, + opt_clip_interval=opt_clip_interval ) params = params_dense_dict(tag) diff --git a/cli/app/commands/biggan/search_class.py b/cli/app/commands/biggan/search_class.py index 6e1df95..050fbef 100644 --- a/cli/app/commands/biggan/search_class.py +++ b/cli/app/commands/biggan/search_class.py @@ -29,10 +29,11 @@ from app.utils.cortex_utils import cortex_folder, download_cortex_files, find_un help='Feature layers used for loss') @click.option('-snap', '--snapshot_interval', 'opt_snapshot_interval', default=20, help='Interval to store sample images') - +@click.option('-snap', '--clip_interval', 'opt_clip_interval', default=500, + help='Interval to clip vectors') @click.pass_context def cli(ctx, opt_folder_id, opt_fp_in, opt_dims, opt_steps, opt_limit, opt_video, opt_tag, - opt_stochastic_clipping, opt_label_clipping, opt_use_feature_detector, opt_feature_layers, opt_snapshot_interval): + opt_stochastic_clipping, opt_label_clipping, opt_use_feature_detector, opt_feature_layers, opt_snapshot_interval, opt_clip_interval): """ Search for an image (class vector) in BigGAN using gradient descent """ @@ -59,4 +60,4 @@ def cli(ctx, opt_folder_id, opt_fp_in, opt_dims, opt_steps, opt_limit, opt_video opt_feature_layers = opt_feature_layers.split(',') find_nearest_vector_for_images(paths, opt_dims, opt_steps, opt_video, opt_tag, opt_limit, - opt_stochastic_clipping, opt_label_clipping, opt_use_feature_detector, opt_feature_layers, opt_snapshot_interval) + opt_stochastic_clipping, opt_label_clipping, opt_use_feature_detector, opt_feature_layers, opt_snapshot_interval, opt_clip_interval) diff --git a/cli/app/search/search_class.py b/cli/app/search/search_class.py index cd53a71..134a139 100644 --- a/cli/app/search/search_class.py +++ b/cli/app/search/search_class.py @@ -46,7 +46,7 @@ feature_layer_names = { def find_nearest_vector_for_images(paths, opt_dims, opt_steps, opt_video, opt_tag, opt_limit=-1, opt_stochastic_clipping=0, opt_label_clipping=0, - opt_use_feature_detector=False, opt_feature_layers=[1,2,4,7], opt_snapshot_interval=20): + opt_use_feature_detector=False, opt_feature_layers=[1,2,4,7], opt_snapshot_interval=20, opt_clip_interval=500): tf.reset_default_graph() sess = tf.compat.v1.Session() print("Initializing generator...") @@ -66,13 +66,13 @@ def find_nearest_vector_for_images(paths, opt_dims, opt_steps, opt_video, opt_ta break out_fns[index] = os.path.basename(path) fp_frames = find_nearest_vector(sess, generator, path, opt_dims, out_images, out_labels, out_latent, opt_steps, index, - opt_stochastic_clipping, opt_label_clipping, opt_use_feature_detector, opt_feature_layers, opt_snapshot_interval) + opt_stochastic_clipping, opt_label_clipping, opt_use_feature_detector, opt_feature_layers, opt_snapshot_interval, opt_clip_interval) if opt_video: export_video(fp_frames) sess.close() def find_nearest_vector(sess, generator, opt_fp_in, opt_dims, out_images, out_labels, out_latent, opt_steps, index, - opt_stochastic_clipping, opt_label_clipping, opt_use_feature_detector, opt_feature_layers, opt_snapshot_interval): + opt_stochastic_clipping, opt_label_clipping, opt_use_feature_detector, opt_feature_layers, opt_snapshot_interval, opt_clip_interval): """ Find the closest latent and class vectors for an image. Store the class vector in an HDF5. """ @@ -122,8 +122,10 @@ def find_nearest_vector(sess, generator, opt_fp_in, opt_dims, out_images, out_la feature_extractor = hub.Module("https://tfhub.dev/google/imagenet/inception_v3/feature_vector/1") # Convert images from range [-1, 1] channels_first to [0, 1] channels_last. - gen_img_1 = tf.transpose(output / 2.0 + 0.5, [0, 2, 3, 1]) - target_img_1 = tf.transpose(target / 2.0 + 0.5, [0, 2, 3, 1]) + # gen_img_1 = tf.transpose(output / 2.0 + 0.5, [0, 2, 3, 1]) + # target_img_1 = tf.transpose(target / 2.0 + 0.5, [0, 2, 3, 1]) + gen_img_1 = output / 2.0 + 0.5 + target_img_1 = target / 2.0 + 0.5 # Convert images to appropriate size for feature extraction. height, width = hub.get_expected_image_size(feature_extractor) |
