summaryrefslogtreecommitdiff
path: root/inversion
diff options
context:
space:
mode:
authorJules Laplace <julescarbon@gmail.com>2019-12-15 01:33:05 +0100
committerJules Laplace <julescarbon@gmail.com>2019-12-15 01:33:05 +0100
commitfcf800545af9297fe9f1f70fb318f78ee6bdadfa (patch)
treedab7fece97def1597814522dc522c01177f68a51 /inversion
parentde724b765de257e07e0ed2547d4990a20957d2bc (diff)
testing lower layers
Diffstat (limited to 'inversion')
-rw-r--r--inversion/image_inversion_inception.py66
1 files changed, 47 insertions, 19 deletions
diff --git a/inversion/image_inversion_inception.py b/inversion/image_inversion_inception.py
index f9d1c79..ae4793f 100644
--- a/inversion/image_inversion_inception.py
+++ b/inversion/image_inversion_inception.py
@@ -220,23 +220,23 @@ if params.features:
gen_feat_ex = feature_extractor(dict(images=gen_img_1), as_dict=True, signature='image_feature_vector')
target_feat_ex = feature_extractor(dict(images=target_img_1), as_dict=True, signature='image_feature_vector')
- gen_feat = gen_feat_ex["InceptionV3/Mixed_7a"]
- target_feat = target_feat_ex["InceptionV3/Mixed_7a"]
- feat_square_diff = tf.reshape(tf.square(gen_feat - target_feat), [BATCH_SIZE, -1])
- feat_loss = tf.reduce_mean(feat_square_diff) * 0.5
- img_feat_err = tf.reduce_mean(feat_square_diff, axis=1) * 0.5
+ # gen_feat = gen_feat_ex["InceptionV3/Mixed_7a"]
+ # target_feat = target_feat_ex["InceptionV3/Mixed_7a"]
+ # feat_square_diff = tf.reshape(tf.square(gen_feat - target_feat), [BATCH_SIZE, -1])
+ # feat_loss = tf.reduce_mean(feat_square_diff) * 0.5
+ # img_feat_err = tf.reduce_mean(feat_square_diff, axis=1) * 0.5
- gen_feat = gen_feat_ex["InceptionV3/Mixed_6b"]
- target_feat = target_feat_ex["InceptionV3/Mixed_6b"]
- feat_square_diff = tf.reshape(tf.square(gen_feat - target_feat), [BATCH_SIZE, -1])
- feat_loss += tf.reduce_mean(feat_square_diff) * 0.16
- img_feat_err += tf.reduce_mean(feat_square_diff, axis=1) * 0.16
+ # # gen_feat = gen_feat_ex["InceptionV3/Mixed_6b"]
+ # # target_feat = target_feat_ex["InceptionV3/Mixed_6b"]
+ # # feat_square_diff = tf.reshape(tf.square(gen_feat - target_feat), [BATCH_SIZE, -1])
+ # # feat_loss += tf.reduce_mean(feat_square_diff) * 0.16
+ # # img_feat_err += tf.reduce_mean(feat_square_diff, axis=1) * 0.16
- gen_feat = gen_feat_ex["InceptionV3/Mixed_5a"]
- target_feat = target_feat_ex["InceptionV3/Mixed_5a"]
- feat_square_diff = tf.reshape(tf.square(gen_feat - target_feat), [BATCH_SIZE, -1])
- feat_loss += tf.reduce_mean(feat_square_diff) * 0.16
- img_feat_err += tf.reduce_mean(feat_square_diff, axis=1) * 0.16
+ # # gen_feat = gen_feat_ex["InceptionV3/Mixed_5a"]
+ # # target_feat = target_feat_ex["InceptionV3/Mixed_5a"]
+ # # feat_square_diff = tf.reshape(tf.square(gen_feat - target_feat), [BATCH_SIZE, -1])
+ # # feat_loss += tf.reduce_mean(feat_square_diff) * 0.16
+ # # img_feat_err += tf.reduce_mean(feat_square_diff, axis=1) * 0.16
# gen_feat = gen_feat_ex["InceptionV3/Mixed_7b"]
# target_feat = target_feat_ex["InceptionV3/Mixed_7b"]
@@ -244,11 +244,36 @@ if params.features:
# feat_loss += tf.reduce_mean(feat_square_diff) * 0.33
# img_feat_err += tf.reduce_mean(feat_square_diff, axis=1)
- gen_feat = gen_feat_ex["InceptionV3/Mixed_7c"]
- target_feat = target_feat_ex["InceptionV3/Mixed_7c"]
+ # # gen_feat = gen_feat_ex["InceptionV3/Mixed_7c"]
+ # # target_feat = target_feat_ex["InceptionV3/Mixed_7c"]
+ # # feat_square_diff = tf.reshape(tf.square(gen_feat - target_feat), [BATCH_SIZE, -1])
+ # # feat_loss += tf.reduce_mean(feat_square_diff) * 0.17
+ # # img_feat_err += tf.reduce_mean(feat_square_diff, axis=1) * 0.17
+
+ # conv1 1, conv1 2, conv3 2 and conv4 2
+ gen_feat = gen_feat_ex["InceptionV3/Conv2d_1a_3x3"]
+ target_feat = target_feat_ex["InceptionV3/Conv2d_1a_3x3"]
+ feat_square_diff = tf.reshape(tf.square(gen_feat - target_feat), [BATCH_SIZE, -1])
+ feat_loss += tf.reduce_mean(feat_square_diff) * 0.25
+ img_feat_err += tf.reduce_mean(feat_square_diff, axis=1) * 0.25
+
+ gen_feat = gen_feat_ex["InceptionV3/Conv2d_2a_3x3"]
+ target_feat = target_feat_ex["InceptionV3/Conv2d_2a_3x3"]
feat_square_diff = tf.reshape(tf.square(gen_feat - target_feat), [BATCH_SIZE, -1])
- feat_loss += tf.reduce_mean(feat_square_diff) * 0.17
- img_feat_err += tf.reduce_mean(feat_square_diff, axis=1) * 0.17
+ feat_loss += tf.reduce_mean(feat_square_diff) * 0.25
+ img_feat_err += tf.reduce_mean(feat_square_diff, axis=1) * 0.25
+
+ gen_feat = gen_feat_ex["InceptionV3/Conv2d_3b_1x1"]
+ target_feat = target_feat_ex["InceptionV3/Conv2d_3b_1x1"]
+ feat_square_diff = tf.reshape(tf.square(gen_feat - target_feat), [BATCH_SIZE, -1])
+ feat_loss += tf.reduce_mean(feat_square_diff) * 0.25
+ img_feat_err += tf.reduce_mean(feat_square_diff, axis=1) * 0.25
+
+ gen_feat = gen_feat_ex["InceptionV3/Conv2d_4a_3x3"]
+ target_feat = target_feat_ex["InceptionV3/Conv2d_4a_3x3"]
+ feat_square_diff = tf.reshape(tf.square(gen_feat - target_feat), [BATCH_SIZE, -1])
+ feat_loss += tf.reduce_mean(feat_square_diff) * 0.25
+ img_feat_err += tf.reduce_mean(feat_square_diff, axis=1) * 0.25
else:
feat_loss = tf.constant(0.0)
@@ -481,11 +506,14 @@ for image_batch, label_batch in image_gen:
if params.save_progress:
# Save linear interpolation between the actual and generated encodings.
if params.dist_loss:
+ print("Generating linear interpolation...")
enc_batch, gen_enc = sess.run([encoding, gen_encoding])
for j in range(10):
+ gen_time = time.time()
custom_enc = gen_enc * (1-(j/10.0)) + enc_batch * (j/10.0)
sess.run(encoding.assign(custom_enc))
gen_images = sess.run(gen_img)
+ print("Generation time: {:.1f}s".format(time.time() - gen_time))
inv_batch = vs.interleave(vs.data2img(image_batch[BATCH_SIZE - SAMPLE_SIZE:]),
vs.data2img(gen_images[BATCH_SIZE - SAMPLE_SIZE:]))
inv_batch = vs.grid_transform(inv_batch)