1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
|
import click
from app.utils import click_utils
from app.settings import app_cfg
from os.path import join
import time
import numpy as np
from PIL import Image
def image_to_uint8(x):
"""Converts [-1, 1] float array to [0, 255] uint8."""
x = np.asarray(x)
x = (256. / 2.) * (x + 1.)
x = np.clip(x, 0, 255)
x = x.astype(np.uint8)
return x
@click.command('')
# @click.option('-i', '--input', 'opt_dir_in', required=True,
# help='Path to input image glob directory')
# @click.option('-r', '--recursive', 'opt_recursive', is_flag=True)
@click.pass_context
def cli(ctx):
"""
"""
print("Loading Tensorflow....")
import tensorflow as tf
import tensorflow_hub as hub
#tf.compat.v1.disable_eager_execution()
#g = tf.compat.v1.get_default_graph()
# Sample a batch of 8 random latent vectors (z) from the Gaussian prior. Then
# call the generator on the latent samples to generate a batch of images with
# shape [8, 128, 128, 3] and range [-1, 1].
#recons = module(z, signature='generate', as_dict=True)['upsampled']
#info = module.get_input_info_dict('encode')['x']
#enc_ph = tf.placeholder(dtype=info.dtype, shape=info.get_shape())
#z = bigbigan.encode(enc_ph, return_all_features=True)['z_mean']
#recons = bigbigan.generate(z, upsample=True)
#recons = outputs['upsampled']
#if return_all_features else outputs['z_sample']
#fp_img_out = "{}.png".format(int(time.time() * 1000))
print("Loading module...")
module = hub.Module('https://tfhub.dev/deepmind/bigbigan-resnet50/1')
z = tf.random.normal([8, 120]) # latent samples
outputs = module(z, signature='generate', as_dict=True)
with tf.Session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
sess.run(tf.compat.v1.tables_initializer())
results = sess.run(outputs)
for sample in results['upsampled']:
sample = image_to_uint8(sample)
img = Image.fromarray(sample, "RGB")
fp_img_out = "{}.png".format(int(time.time() * 1000))
img.save(join(app_cfg.DIR_OUTPUTS, fp_img_out))
#print(result)
#tf.keras.preprocessing.image.save_img(
# join(app_cfg.DIR_OUTPUTS, fp_img_out),
# gen_samples,
#)
#with tf.Session() as sess:
# gen_samples = gen_samples.eval()
# print(gen_samples)
# # Given a batch of 256x256 RGB images in range [-1, 1], call the encoder to
# # compute predicted latents z and other features (e.g. for use in downstream
# # recognition tasks).
# images = tf.placeholder(tf.float32, shape=[None, 256, 256, 3])
# features = module(images, signature='encode', as_dict=True)
# # Get the predicted latent sample `z_sample` from the dict of features.
# # Other available features include `avepool_feat` and `bn_crelu_feat`, used in
# # the representation learning results.
# z_sample = features['z_sample'] # shape [?, 120]
# # Compute reconstructions of the input `images` by passing the encoder's output
# # `z_sample` back through the generator. Note that raw generator outputs are
# # half the resolution of encoder inputs (128x128). To get upsampled generator
# # outputs matching the encoder input resolution (256x256), instead use:
# # recons = module(z_sample, signature='generate', as_dict=True)['upsampled']
# recons = module(z_sample, signature='generate') # shape [?, 128, 128, 3]
|