diff options
| -rw-r--r-- | cli/app/commands/biggan/extract_dense_vectors.py | 4 | ||||
| -rw-r--r-- | cli/app/search/search_dense.py | 11 |
2 files changed, 7 insertions, 8 deletions
diff --git a/cli/app/commands/biggan/extract_dense_vectors.py b/cli/app/commands/biggan/extract_dense_vectors.py index 05b2c23..018d646 100644 --- a/cli/app/commands/biggan/extract_dense_vectors.py +++ b/cli/app/commands/biggan/extract_dense_vectors.py @@ -10,7 +10,7 @@ from app.search.params import timestamp @click.command('') @click.option('-f', '--folder_id', 'opt_folder_id', type=int, help='Folder ID to process') -@click.option('-ls', '--latent_steps', 'opt_latent_steps', default=100, type=int, +@click.option('-ls', '--latent_steps', 'opt_latent_steps', default=200, type=int, help='Number of optimization iterations') @click.option('-ds', '--dense_steps', 'opt_dense_steps', default=2000, type=int, help='Number of optimization iterations') @@ -24,7 +24,7 @@ from app.search.params import timestamp help='Normalize labels every N steps') @click.option('-feat', '--use_feature_detector', 'opt_use_feature_detector', is_flag=True, help='Compute feature loss') -@click.option('-ll', '--feature_layers', 'opt_feature_layers', default="1a,2a,4a,7a", +@click.option('-ll', '--feature_layers', 'opt_feature_layers', default="1a,2a,3a,4a,7a", help='Feature layers used for loss') @click.option('-snap', '--snapshot_interval', 'opt_snapshot_interval', default=20, help='Interval to store sample images') diff --git a/cli/app/search/search_dense.py b/cli/app/search/search_dense.py index 5c07be1..dcef82f 100644 --- a/cli/app/search/search_dense.py +++ b/cli/app/search/search_dense.py @@ -212,20 +212,20 @@ def find_dense_embedding_for_images(params, opt_tag="inverse_" + timestamp(), op # Optimizer. # -------------------------- if params.decay_lr: - lrate = tf.train.exponential_decay(params.lr, inv_step, - params.inv_it / params.decay_n, 0.1, staircase=True) + lrate = tf.train.exponential_decay(params.lr, inv_step, params.inv_it, 0.96) + # lrate = tf.train.exponential_decay(params.lr, inv_step, params.inv_it / params.decay_n, 0.1, staircase=True) else: lrate = tf.constant(params.lr) - # trained_params = [label, latent, encoding] - trained_params = [latent, encoding] + trained_params = [label, latent, encoding] + # trained_params = [latent, encoding] optimizer = tf.train.AdamOptimizer(learning_rate=lrate, beta1=0.9, beta2=0.999) inv_train_op = optimizer.minimize(inv_loss, var_list=trained_params, global_step=inv_step) reinit_optimizer = tf.variables_initializer(optimizer.variables()) - optimizer_quad = tf.train.AdamOptimizer(learning_rate=params.lr_quad, beta1=0.9, beta2=0.999) + optimizer_quad = tf.train.AdamOptimizer(learning_rate=lrate, beta1=0.9, beta2=0.999) inv_train_op_quad = optimizer_quad.minimize(inv_loss_quad, var_list=trained_params, global_step=inv_step) reinit_optimizer_quad = tf.variables_initializer(optimizer_quad.variables()) @@ -401,7 +401,6 @@ def find_dense_embedding_for_images(params, opt_tag="inverse_" + timestamp(), op sess.close() def feature_loss(feature_extractor, opt_feature_layers, BATCH_SIZE, img_a, img_b, y, x, height, width): - print("{} {} {} {}".format(y, x, height, width)) if y is not None: img_a = tf.image.crop_to_bounding_box(img_a, y, x, height, width) img_b = tf.image.crop_to_bounding_box(img_b, y, x, height, width) |
