summaryrefslogtreecommitdiff
path: root/cli
diff options
context:
space:
mode:
Diffstat (limited to 'cli')
-rw-r--r--cli/app/search/search_dense.py32
1 files changed, 25 insertions, 7 deletions
diff --git a/cli/app/search/search_dense.py b/cli/app/search/search_dense.py
index 7e144c8..b2c77f4 100644
--- a/cli/app/search/search_dense.py
+++ b/cli/app/search/search_dense.py
@@ -239,13 +239,13 @@ def find_dense_embedding_for_images(params, opt_tag="inverse_" + timestamp(), op
feature_loss = feature_loss_tfhub
height, width = hub.get_expected_image_size(feature_extractor)
- feat_loss_inception, img_feat_err = feature_loss(feature_extractor, opt_feature_layers, BATCH_SIZE, gen_img_ch, target_img_ch, None, None, height, width)
+ feat_loss_inception_q, img_feat_err = feature_loss(feature_extractor, opt_feature_layers, BATCH_SIZE, gen_img_ch, target_img_ch, None, None, height, width)
# feat_loss_a, feat_err_a = feature_loss(feature_extractor, opt_feature_layers, BATCH_SIZE, gen_img_ch, target_img_ch, 0, 0, height, width)
- # feat_loss_b, feat_err_b = feature_loss(feature_extractor, opt_feature_layers, BATCH_SIZE, gen_img_ch, target_img_ch, img_w - width, 0, height, width)
- # feat_loss_c, feat_err_c = feature_loss(feature_extractor, opt_feature_layers, BATCH_SIZE, gen_img_ch, target_img_ch, 0, img_w - width, height, width)
- # feat_loss_d, feat_err_d = feature_loss(feature_extractor, opt_feature_layers, BATCH_SIZE, gen_img_ch, target_img_ch, img_w - width, img_w - width, height, width)
- # feat_loss_e, feat_err_e = feature_loss(feature_extractor, opt_feature_layers, BATCH_SIZE, gen_img_ch, target_img_ch, int((img_w - width) / 2), int((img_w - width) / 2), height, width)
+ feat_loss_inception_a, feat_err_inception_a = feature_loss(feature_extractor, opt_feature_layers, BATCH_SIZE, gen_img_ch, target_img_ch, img_w - width, 0, height, width)
+ feat_loss_inception_b, feat_err_inception_b = feature_loss(feature_extractor, opt_feature_layers, BATCH_SIZE, gen_img_ch, target_img_ch, 0, img_w - width, height, width)
+ feat_loss_inception_c, feat_err_inception_c = feature_loss(feature_extractor, opt_feature_layers, BATCH_SIZE, gen_img_ch, target_img_ch, img_w - width, img_w - width, height, width)
+ feat_loss_inception_d, feat_err_inception_d = feature_loss(feature_extractor, opt_feature_layers, BATCH_SIZE, gen_img_ch, target_img_ch, int((img_w - width) / 2), int((img_w - width) / 2), height, width)
################################################
# VGG feature extractor
@@ -261,8 +261,19 @@ def find_dense_embedding_for_images(params, opt_tag="inverse_" + timestamp(), op
height = 224
width = 224
- feat_loss_vgg, img_feat_err_vgg = feature_loss_vgg(feature_extractor, opt_feature_layers, BATCH_SIZE, gen_img_ch, target_img_ch, None, None, height, width)
+ feat_loss_vgg_q, img_feat_err_vgg = feature_loss_vgg(feature_extractor, opt_feature_layers, BATCH_SIZE, gen_img_ch, target_img_ch, None, None, height, width)
+ feat_loss_vgg_a, feat_err_vgg_a = feature_loss_vgg(feature_extractor, opt_feature_layers, BATCH_SIZE, gen_img_ch, target_img_ch, 16, 16, height, width)
+ feat_loss_vgg_b, feat_err_vgg_b = feature_loss_vgg(feature_extractor, opt_feature_layers, BATCH_SIZE, gen_img_ch, target_img_ch, img_w - width - 16, 16, height, width)
+ feat_loss_vgg_c, feat_err_vgg_c = feature_loss_vgg(feature_extractor, opt_feature_layers, BATCH_SIZE, gen_img_ch, target_img_ch, 16, img_w - width - 16, height, width)
+ feat_loss_vgg_d, feat_err_vgg_d = feature_loss_vgg(feature_extractor, opt_feature_layers, BATCH_SIZE, gen_img_ch, target_img_ch, img_w - width - 16, img_w - width - 16, height, width)
+ # feat_loss_vgg_e, feat_err_vgg_e = feature_loss_vgg(feature_extractor, opt_feature_layers, BATCH_SIZE, gen_img_ch, target_img_ch, int((img_w - width) / 2), int((img_w - width) / 2), height, width)
+
+ vgg_small_n = tf.placeholder(tf.float32, shape=(), name="vgg")
+ inception_small_n = tf.placeholder(tf.float32, shape=(), name="vgg")
+
+ feat_loss_vgg = (feat_loss_vgg_a + feat_loss_vgg_b + feat_loss_vgg_c + feat_loss_vgg_d) * (vgg_small_n) / 4 + feat_loss_vgg_q * (1 - vgg_small_n)
+ feat_loss_inception = (feat_loss_inception_a + feat_loss_inception_b + feat_loss_inception_c + feat_loss_inception_d) * (inception_small_n) / 4 + feat_loss_inception_q * (1 - vgg_small_n)
feat_loss = feat_loss_vgg + 10.0 * feat_loss_inception
# mse_loss_a = mse_loss_crop(target_img_ch, gen_img_ch, 0, 0, img_w / 2, img_w / 2)
@@ -441,7 +452,14 @@ def find_dense_embedding_for_images(params, opt_tag="inverse_" + timestamp(), op
print("Beginning dense iteration...")
for _ in range(params.inv_it):
- _inv_loss, _mse_loss, _feat_loss, _feat_loss_vgg, _feat_loss_inception, _lrate, _ = sess.run([inv_loss, mse_loss, feat_loss, feat_loss_vgg, feat_loss_inception, lrate, inv_train_op])
+ if it < params.inv_it * 0.7:
+ n = 0.0
+ elif it < params.inv_it * 0.75:
+ n = (it - params.inv_it * 0.70) / (params.inv_it * 0.05)
+ else:
+ n = 1.0
+
+ _inv_loss, _mse_loss, _feat_loss, _feat_loss_vgg, _feat_loss_inception, _lrate, _ = sess.run([inv_loss, mse_loss, feat_loss, feat_loss_vgg, feat_loss_inception, lrate, inv_train_op], feed_dict={ vgg_small_n: n, inception_small_n: n })
# if it < params.inv_it * 0.5:
# _inv_loss, _mse_loss, _feat_loss, _lrate, _ = sess.run([inv_loss, mse_loss, feat_loss, lrate, inv_train_op])
# elif it < params.inv_it * 0.75: