summaryrefslogtreecommitdiff
path: root/cli/app/search/search_dense.py
diff options
context:
space:
mode:
Diffstat (limited to 'cli/app/search/search_dense.py')
-rw-r--r--cli/app/search/search_dense.py11
1 files changed, 5 insertions, 6 deletions
diff --git a/cli/app/search/search_dense.py b/cli/app/search/search_dense.py
index 5c07be1..dcef82f 100644
--- a/cli/app/search/search_dense.py
+++ b/cli/app/search/search_dense.py
@@ -212,20 +212,20 @@ def find_dense_embedding_for_images(params, opt_tag="inverse_" + timestamp(), op
# Optimizer.
# --------------------------
if params.decay_lr:
- lrate = tf.train.exponential_decay(params.lr, inv_step,
- params.inv_it / params.decay_n, 0.1, staircase=True)
+ lrate = tf.train.exponential_decay(params.lr, inv_step, params.inv_it, 0.96)
+ # lrate = tf.train.exponential_decay(params.lr, inv_step, params.inv_it / params.decay_n, 0.1, staircase=True)
else:
lrate = tf.constant(params.lr)
- # trained_params = [label, latent, encoding]
- trained_params = [latent, encoding]
+ trained_params = [label, latent, encoding]
+ # trained_params = [latent, encoding]
optimizer = tf.train.AdamOptimizer(learning_rate=lrate, beta1=0.9, beta2=0.999)
inv_train_op = optimizer.minimize(inv_loss, var_list=trained_params,
global_step=inv_step)
reinit_optimizer = tf.variables_initializer(optimizer.variables())
- optimizer_quad = tf.train.AdamOptimizer(learning_rate=params.lr_quad, beta1=0.9, beta2=0.999)
+ optimizer_quad = tf.train.AdamOptimizer(learning_rate=lrate, beta1=0.9, beta2=0.999)
inv_train_op_quad = optimizer_quad.minimize(inv_loss_quad, var_list=trained_params, global_step=inv_step)
reinit_optimizer_quad = tf.variables_initializer(optimizer_quad.variables())
@@ -401,7 +401,6 @@ def find_dense_embedding_for_images(params, opt_tag="inverse_" + timestamp(), op
sess.close()
def feature_loss(feature_extractor, opt_feature_layers, BATCH_SIZE, img_a, img_b, y, x, height, width):
- print("{} {} {} {}".format(y, x, height, width))
if y is not None:
img_a = tf.image.crop_to_bounding_box(img_a, y, x, height, width)
img_b = tf.image.crop_to_bounding_box(img_b, y, x, height, width)