summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--cli/app/commands/biggan/extract_dense_vectors.py2
-rw-r--r--cli/app/search/search_class.py5
2 files changed, 4 insertions, 3 deletions
diff --git a/cli/app/commands/biggan/extract_dense_vectors.py b/cli/app/commands/biggan/extract_dense_vectors.py
index f7cffb2..3ca7313 100644
--- a/cli/app/commands/biggan/extract_dense_vectors.py
+++ b/cli/app/commands/biggan/extract_dense_vectors.py
@@ -24,7 +24,7 @@ from app.search.params import timestamp
help='Normalize labels every N steps')
@click.option('-feat', '--use_feature_detector', 'opt_use_feature_detector', is_flag=True,
help='Compute feature loss')
-@click.option('-ll', '--feature_layers', 'opt_feature_layers', default="1a,2a,3a,4a,7a",
+@click.option('-ll', '--feature_layers', 'opt_feature_layers', default="1a,3a,7a",
help='Feature layers used for loss')
@click.option('-snap', '--snapshot_interval', 'opt_snapshot_interval', default=20,
help='Interval to store sample images')
diff --git a/cli/app/search/search_class.py b/cli/app/search/search_class.py
index 921e586..ccb10cd 100644
--- a/cli/app/search/search_class.py
+++ b/cli/app/search/search_class.py
@@ -113,7 +113,7 @@ def find_nearest_vector(sess, generator, opt_fp_in, opt_dims, out_images, out_la
## normalize the Y encoding
# normalized_labels = tf.nn.l2_normalize(input_y)
# tf.reduce_mean(tf.abs(encoding - gen_encoding))
- normalized_labels = input_y / tf.reduce_sum(input_y)
+ normalized_labels = input_y / tf.reduce_max(input_y)
normalized_alpha = tf.compat.v1.placeholder(dtype=np.float32, shape=())
clip_labels = tf.assign(input_y, normalized_labels * (1 - normalized_alpha) + input_y * normalized_alpha)
@@ -196,7 +196,8 @@ def find_nearest_vector(sess, generator, opt_fp_in, opt_dims, out_images, out_la
if opt_stochastic_clipping and (i % opt_clip_interval) == 0: # and i < opt_steps * 0.45:
sess.run(clip_latent, { clipped_alpha: (i / opt_steps) * 2 })
if opt_label_clipping and (i % opt_clip_interval) == 0: # and i < opt_steps * 0.75:
- sess.run(clip_labels, { normalized_alpha: (i / opt_steps) ** 2 })
+ # sess.run(clip_labels, { normalized_alpha: (i / opt_steps) ** 2 })
+ sess.run(clip_labels, { normalized_alpha: i / opt_steps })
if opt_video and opt_snapshot_interval != 0 and (i % opt_snapshot_interval) == 0:
phi_guess = sess.run(output)
guess_im = imgrid(imconvert_uint8(phi_guess), cols=1)