summaryrefslogtreecommitdiff
path: root/cli/app/search
diff options
context:
space:
mode:
authorJules Laplace <julescarbon@gmail.com>2020-01-08 17:47:44 +0100
committerJules Laplace <julescarbon@gmail.com>2020-01-08 17:47:44 +0100
commit034922d32c1d9df996e6292a17fb4fb4cb04395d (patch)
tree0e2454b07c59e7e58c50073afa00db384635e41d /cli/app/search
parentf9a008b225e9c67b2ccabccb1eee0c261c61c26d (diff)
fix dense
Diffstat (limited to 'cli/app/search')
-rw-r--r--cli/app/search/search_dense.py64
1 files changed, 20 insertions, 44 deletions
diff --git a/cli/app/search/search_dense.py b/cli/app/search/search_dense.py
index 46183c7..362e0ce 100644
--- a/cli/app/search/search_dense.py
+++ b/cli/app/search/search_dense.py
@@ -21,49 +21,25 @@ from app.settings import app_cfg
from app.utils.file_utils import write_pickle
from app.utils.cortex_utils import upload_bytes_to_cortex
-# --------------------------
-# Hyper-parameters.
-# --------------------------
-# Expected parameters:
-# generator_path: path to generator module.
-# generator_fixed_inputs: dictionary of fixed generator's input parameters.
-# dataset: name of the dataset (hdf5 file).
-# dataset_out: name for the output inverted dataset (hdf5 file).
-# General parameters:
-# batch_size: number of images inverted at the same time.
-# inv_it: number of iterations to invert an image.
-# inv_layer: 'latent' or name of the tensor of the custom layer to be inverted.
-# lr: learning rate.
-# decay_lr: exponential decay on the learning rate.
-# decay_n: number of exponential decays on the learning rate.
-# custom_grad_relu: replace relus with custom gradient.
-# Logging:
-# sample_size: number of images included in sampled images.
-# save_progress: whether to save intermediate images during optimization.
-# log_z_norm: log the norm of different sections of z.
-# log_activation_layer: log the percentage of active neurons in this layer.
-# Losses:
-# mse: use the mean squared error on pixels for image comparison.
-# features: use features extracted by a feature extractor for image comparison.
-# feature_extractor_path: path to feature extractor module.
-# feature_extractor_output: output name from feature extractor.
-# likeli_loss: regularization loss on the log likelihood of encodings.
-# norm_loss: regularization loss on the norm of encodings.
-# dist_loss: whether to include a loss on the dist between g1(z) and enc.
-# lambda_mse: coefficient for mse loss.
-# lambda_feat: coefficient for features loss.
-# lambda_reg: coefficient for regularization loss on latent.
-# lambda_dist: coefficient for l1 regularization on delta.
-# Latent:
-# clipping: whether to clip encoding values after every update.
-# stochastic_clipping: whether to consider stochastic clipping.
-# clip: clipping bound.
-# pretrained_latent: load pre trained fixed latent.
-# fixed_z: do not train the latent vector.
-# Initialization:
-# init_gen_dist: initialize encodings from the generated distribution.
-# init_lo: init min value.
-# init_hi: init max value.
+feature_layer_names = {
+ '1a': "InceptionV3/Conv2d_1a_3x3",
+ '2a': "InceptionV3/Conv2d_2a_3x3",
+ '2b': "InceptionV3/Conv2d_2b_3x3",
+ '3a': "InceptionV3/Conv2d_3a_3x3",
+ '3b': "InceptionV3/Conv2d_3b_3x3",
+ '4a': "InceptionV3/Conv2d_4a_3x3",
+ '5b': "InceptionV3/Mixed_5b",
+ '5c': "InceptionV3/Mixed_5c",
+ '5d': "InceptionV3/Mixed_5d",
+ '6a': "InceptionV3/Mixed_6a",
+ '6b': "InceptionV3/Mixed_6b",
+ '6c': "InceptionV3/Mixed_6c",
+ '6d': "InceptionV3/Mixed_6d",
+ '6e': "InceptionV3/Mixed_6e",
+ '7a': "InceptionV3/Mixed_7a",
+ '7b': "InceptionV3/Mixed_7b",
+ '7c': "InceptionV3/Mixed_7c",
+}
def find_dense_embedding_for_images(params):
# --------------------------
@@ -444,7 +420,7 @@ def find_dense_embedding_for_images(params):
for i in range(BATCH_SIZE):
out_i = out_pos + i
sample_fn, ext = os.path.splitext(sample_fns[out_i])
- image = Image.fromarray(image)
+ image = Image.fromarray(images[i])
fp = BytesIO()
image.save(fp, format='png')
data = upload_bytes_to_cortex(params.folder_id, sample_fn + "-inverse.png", fp, "image/png")