diff options
| author | Jules Laplace <julescarbon@gmail.com> | 2020-02-10 19:13:34 +0100 |
|---|---|---|
| committer | Jules Laplace <julescarbon@gmail.com> | 2020-02-10 19:13:34 +0100 |
| commit | 83eed8438f978f9b75268dda890134885b07ef6d (patch) | |
| tree | 908da467a2f768c45db78543cecad2a9dfdb1cfa /cli | |
| parent | 422410e391f4ca7b339c84df2432ca7873f420f5 (diff) | |
feature loss on quadrants
Diffstat (limited to 'cli')
| -rw-r--r-- | cli/app/commands/biggan/extract_dense_vectors.py | 2 | ||||
| -rw-r--r-- | cli/app/search/search_dense.py | 14 |
2 files changed, 8 insertions, 8 deletions
diff --git a/cli/app/commands/biggan/extract_dense_vectors.py b/cli/app/commands/biggan/extract_dense_vectors.py index 54f9762..0f61528 100644 --- a/cli/app/commands/biggan/extract_dense_vectors.py +++ b/cli/app/commands/biggan/extract_dense_vectors.py @@ -24,7 +24,7 @@ from app.search.params import timestamp help='Normalize labels every N steps') @click.option('-feat', '--use_feature_detector', 'opt_use_feature_detector', is_flag=True, help='Compute feature loss') -@click.option('-ll', '--feature_layers', 'opt_feature_layers', default="1a,2a,3a,4a,7a", +@click.option('-ll', '--feature_layers', 'opt_feature_layers', default="1a,2a,4a,7a", help='Feature layers used for loss') @click.option('-snap', '--snapshot_interval', 'opt_snapshot_interval', default=20, help='Interval to store sample images') diff --git a/cli/app/search/search_dense.py b/cli/app/search/search_dense.py index 1065edb..a296264 100644 --- a/cli/app/search/search_dense.py +++ b/cli/app/search/search_dense.py @@ -43,7 +43,7 @@ feature_layer_names = { '7c': "InceptionV3/Mixed_7c", } -def find_dense_embedding_for_images(params, opt_tag="inverse_" + timestamp(), opt_feature_layers=["1a,2a,3a,4a,7a"], opt_save_progress=True): +def find_dense_embedding_for_images(params, opt_tag="inverse_" + timestamp(), opt_feature_layers=["1a,2a,4a,7a"], opt_save_progress=True): # -------------------------- # Global directories. # -------------------------- @@ -188,12 +188,12 @@ def find_dense_embedding_for_images(params, opt_tag="inverse_" + timestamp(), op height, width = hub.get_expected_image_size(feature_extractor) img_w = IMG_SHAPE[0] - feat_loss, img_feat_err = feature_loss(feature_extractor, gen_img_ch, target_img_ch, None, None, height, width) + feat_loss, img_feat_err = feature_loss(feature_extractor, opt_feature_layers, BATCH_SIZE, gen_img_ch, target_img_ch, None, None, height, width) - feat_loss_a, feat_err_a = feature_loss(feature_extractor, gen_img_ch, target_img_ch, 0, 0, height, width) - feat_loss_b, feat_err_b = feature_loss(feature_extractor, gen_img_ch, target_img_ch, img_w - width, 0, height, width) - feat_loss_c, feat_err_c = feature_loss(feature_extractor, gen_img_ch, target_img_ch, 0, img_w - width, height, width) - feat_loss_d, feat_err_d = feature_loss(feature_extractor, gen_img_ch, target_img_ch, img_w - width, img_w - width, height, width) + feat_loss_a, feat_err_a = feature_loss(feature_extractor, opt_feature_layers, BATCH_SIZE, gen_img_ch, target_img_ch, 0, 0, height, width) + feat_loss_b, feat_err_b = feature_loss(feature_extractor, opt_feature_layers, BATCH_SIZE, gen_img_ch, target_img_ch, img_w - width, 0, height, width) + feat_loss_c, feat_err_c = feature_loss(feature_extractor, opt_feature_layers, BATCH_SIZE, gen_img_ch, target_img_ch, 0, img_w - width, height, width) + feat_loss_d, feat_err_d = feature_loss(feature_extractor, opt_feature_layers, BATCH_SIZE, gen_img_ch, target_img_ch, img_w - width, img_w - width, height, width) feat_loss_quad = feat_loss_a + feat_loss_b + feat_loss_c + feat_loss_d img_feat_err_quad = feat_err_a + feat_err_b + feat_err_c + feat_err_d @@ -400,7 +400,7 @@ def find_dense_embedding_for_images(params, opt_tag="inverse_" + timestamp(), op out_file.close() sess.close() -def feature_loss(feature_extractor, img_a, img_b, y, x, height, width): +def feature_loss(feature_extractor, opt_feature_layers, BATCH_SIZE, img_a, img_b, y, x, height, width): if y is not None: img_a = tf.image.crop_to_bounding_box(img_a, y, x, height, width) img_b = tf.image.crop_to_bounding_box(img_b, y, x, height, width) |
