diff options
| author | Jules Laplace <julescarbon@gmail.com> | 2020-01-11 02:25:15 +0100 |
|---|---|---|
| committer | Jules Laplace <julescarbon@gmail.com> | 2020-01-11 02:25:15 +0100 |
| commit | e4a32154c0f55f974c8fd210495898cb15027d8c (patch) | |
| tree | 1d6336a82315db023d2b9118adf976902d2b4d8a /cli/app/search | |
| parent | 179f98c949dbeeb383d152e46610dd6789616231 (diff) | |
dense search with args
Diffstat (limited to 'cli/app/search')
| -rw-r--r-- | cli/app/search/search_dense.py | 57 |
1 files changed, 34 insertions, 23 deletions
diff --git a/cli/app/search/search_dense.py b/cli/app/search/search_dense.py index ac67d07..ac66a73 100644 --- a/cli/app/search/search_dense.py +++ b/cli/app/search/search_dense.py @@ -42,7 +42,7 @@ feature_layer_names = { '7c': "InceptionV3/Mixed_7c", } -def find_dense_embedding_for_images(params): +def find_dense_embedding_for_images(params, opt_tag="inverse", opt_feature_layers=["1a,2a,3a,4a,7a"], opt_save_progress=True): # -------------------------- # Global directories. # -------------------------- @@ -227,24 +227,35 @@ def find_dense_embedding_for_images(params): # # feat_loss += tf.reduce_mean(feat_square_diff) * 0.17 # # img_feat_err += tf.reduce_mean(feat_square_diff, axis=1) * 0.17 + feat_square_diff = tf.constant(0.0) + img_feat_err = tf.constant(0.0) + + for layer in opt_feature_layers: + layer_name = feature_layer_names[layer] + gen_feat = gen_feat_ex[layer_name] + target_feat = target_feat_ex[layer_name] + feat_square_diff = tf.reshape(tf.square(gen_feat - target_feat), [batch_size, -1]) + feat_loss += tf.reduce_mean(feat_square_diff) / len(opt_feature_layers) + img_feat_err += tf.reduce_mean(feat_square_diff, axis=1) / len(opt_feature_layers) + # conv1 1, conv1 2, conv3 2 and conv4 2 - gen_feat = gen_feat_ex["InceptionV3/Conv2d_1a_3x3"] - target_feat = target_feat_ex["InceptionV3/Conv2d_1a_3x3"] - feat_square_diff = tf.reshape(tf.square(gen_feat - target_feat), [BATCH_SIZE, -1]) - feat_loss = tf.reduce_mean(feat_square_diff) * 0.25 - img_feat_err = tf.reduce_mean(feat_square_diff, axis=1) * 0.25 + # gen_feat = gen_feat_ex["InceptionV3/Conv2d_1a_3x3"] + # target_feat = target_feat_ex["InceptionV3/Conv2d_1a_3x3"] + # feat_square_diff = tf.reshape(tf.square(gen_feat - target_feat), [BATCH_SIZE, -1]) + # feat_loss = tf.reduce_mean(feat_square_diff) * 0.25 + # img_feat_err = tf.reduce_mean(feat_square_diff, axis=1) * 0.25 - gen_feat = gen_feat_ex["InceptionV3/Conv2d_2a_3x3"] - target_feat = target_feat_ex["InceptionV3/Conv2d_2a_3x3"] - feat_square_diff = tf.reshape(tf.square(gen_feat - target_feat), [BATCH_SIZE, -1]) - feat_loss += tf.reduce_mean(feat_square_diff) * 0.25 - img_feat_err += tf.reduce_mean(feat_square_diff, axis=1) * 0.25 + # gen_feat = gen_feat_ex["InceptionV3/Conv2d_2a_3x3"] + # target_feat = target_feat_ex["InceptionV3/Conv2d_2a_3x3"] + # feat_square_diff = tf.reshape(tf.square(gen_feat - target_feat), [BATCH_SIZE, -1]) + # feat_loss += tf.reduce_mean(feat_square_diff) * 0.25 + # img_feat_err += tf.reduce_mean(feat_square_diff, axis=1) * 0.25 - gen_feat = gen_feat_ex["InceptionV3/Conv2d_3b_1x1"] - target_feat = target_feat_ex["InceptionV3/Conv2d_3b_1x1"] - feat_square_diff = tf.reshape(tf.square(gen_feat - target_feat), [BATCH_SIZE, -1]) - feat_loss += tf.reduce_mean(feat_square_diff) * 0.25 - img_feat_err += tf.reduce_mean(feat_square_diff, axis=1) * 0.25 + # gen_feat = gen_feat_ex["InceptionV3/Conv2d_3b_1x1"] + # target_feat = target_feat_ex["InceptionV3/Conv2d_3b_1x1"] + # feat_square_diff = tf.reshape(tf.square(gen_feat - target_feat), [BATCH_SIZE, -1]) + # feat_loss += tf.reduce_mean(feat_square_diff) * 0.25 + # img_feat_err += tf.reduce_mean(feat_square_diff, axis=1) * 0.25 # gen_feat = gen_feat_ex["InceptionV3/Mixed_6a"] # target_feat = target_feat_ex["InceptionV3/Mixed_6a"] @@ -252,11 +263,11 @@ def find_dense_embedding_for_images(params): # feat_loss += tf.reduce_mean(feat_square_diff) * 0.25 # img_feat_err += tf.reduce_mean(feat_square_diff, axis=1) * 0.25 - gen_feat = gen_feat_ex["InceptionV3/Mixed_7a"] - target_feat = target_feat_ex["InceptionV3/Mixed_7a"] - feat_square_diff = tf.reshape(tf.square(gen_feat - target_feat), [BATCH_SIZE, -1]) - feat_loss += tf.reduce_mean(feat_square_diff) * 0.25 - img_feat_err += tf.reduce_mean(feat_square_diff, axis=1) * 0.25 + # gen_feat = gen_feat_ex["InceptionV3/Mixed_7a"] + # target_feat = target_feat_ex["InceptionV3/Mixed_7a"] + # feat_square_diff = tf.reshape(tf.square(gen_feat - target_feat), [BATCH_SIZE, -1]) + # feat_loss += tf.reduce_mean(feat_square_diff) * 0.25 + # img_feat_err += tf.reduce_mean(feat_square_diff, axis=1) * 0.25 else: feat_loss = tf.constant(0.0) @@ -393,7 +404,7 @@ def find_dense_embedding_for_images(params): sys.stdout.flush() # Save target images and reconstructions. - if params.save_progress: + if opt_save_progress: assert SAMPLE_SIZE <= BATCH_SIZE gen_time = time.time() gen_images = sess.run(gen_img) @@ -423,7 +434,7 @@ def find_dense_embedding_for_images(params): image = Image.fromarray(images[i]) fp = BytesIO() image.save(fp, format='png') - data = upload_bytes_to_cortex(params.folder_id, sample_fn + "-inverse.png", fp, "image/png") + data = upload_bytes_to_cortex(params.folder_id, "{}-{}.png".format(sample_fn, tag), fp, "image/png") print(json.dumps(data, indent=2)) if data is not None and 'files' in data: file_id = data['files'][0]['id'] |
