summaryrefslogtreecommitdiff
path: root/cli
diff options
context:
space:
mode:
Diffstat (limited to 'cli')
-rw-r--r--cli/app/search/search_class.py7
1 files changed, 4 insertions, 3 deletions
diff --git a/cli/app/search/search_class.py b/cli/app/search/search_class.py
index 105fc9f..3a56f68 100644
--- a/cli/app/search/search_class.py
+++ b/cli/app/search/search_class.py
@@ -110,7 +110,8 @@ def find_nearest_vector(sess, generator, opt_fp_in, opt_dims, out_images, out_la
# normalized_labels = tf.nn.l2_normalize(input_y)
# tf.reduce_mean(tf.abs(encoding - gen_encoding))
normalized_labels = input_y / tf.reduce_sum(input_y)
- clip_labels = tf.assign(input_y, normalized_labels)
+ normalized_alpha = tf.compat.v1.placeholder(dtype=np.float32, shape=())
+ clip_labels = tf.assign(input_y, normalized_labels * (1 - normalized_alpha) + input_y * normalized_alpha)
## if computing Feature loss, use these encoders
if opt_use_feature_detector:
@@ -189,9 +190,9 @@ def find_nearest_vector(sess, generator, opt_fp_in, opt_dims, out_images, out_la
print('iter: {}, loss: {}'.format(i, curr_loss))
if i > 0:
if opt_stochastic_clipping and (i % opt_clip_interval) == 0:
- sess.run(clip_latent)
+ sess.run(clip_latent, { normalized_alpha: i / opt_steps })
if opt_label_clipping and (i % opt_clip_interval) == 0:
- sess.run(clip_labels)
+ sess.run(clip_labels, { normalized_alpha: i / opt_steps })
if opt_video and opt_snapshot_interval != 0 and (i % opt_snapshot_interval) == 0:
phi_guess = sess.run(output)
guess_im = imgrid(imconvert_uint8(phi_guess), cols=1)