1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
|
import os
from app.settings import app_cfg
from app.utils.file_utils import write_json
from app.search.params import ParamsDict
def make_params_latent(tag):
return {
"tag": tag,
"decay_n": 2,
"features": True,
"clip": 1.0,
"stochastic_clipping": False,
"clipping": False,
"path": os.path.join(app_cfg.DIR_INVERSES, tag),
"dataset": os.path.join(app_cfg.DIR_INVERSES, tag, "dataset.hdf5"),
"out_dataset": os.path.join(app_cfg.DIR_INVERSES, tag, "dataset.latent.hdf5"),
"inv_layer": "latent",
"decay_lr": True,
"inv_it": 5000,
"generator_path": "https://tfhub.dev/deepmind/biggan-512/2",
"attention_map_layer": "Generator_2/attention/Softmax:0",
"pre_trained_latent": False,
"lambda_dist": 0.0,
"likeli_loss": True,
"init_hi": 0.001,
"lr": 0.1,
"norm_loss": False,
"generator_fixed_inputs": {
"truncation": 1.0
},
"log_z_norm": True,
"feature_extractor_path": "https://tfhub.dev/google/imagenet/inception_v3/feature_vector/1",
"mse": True,
"custom_grad_relu": False,
"random_label": False,
"lambda_feat": 1.0,
"init_gen_dist": False,
"log_activation_layer": "Generator_2/GBlock/Relu:0",
"batch_size": 4,
"fixed_z": False,
"feature_extractor_output": "InceptionV3/Mixed_7a",
"init_lo": -0.001,
"lambda_mse": 1.0,
"lambda_reg": 0.1,
"dist_loss": False,
"sample_size": 4,
"invert_labels": False,
"save_progress": True,
}
def params_latent(tag):
return ParamsDict(make_params_latent(tag))
def save_params_latent(fp_out_dir, tag):
data = make_params_latent(tag)
fp_out_fn = os.path.join(fp_out_dir, "params_latent.json")
write_json(data, fp_out_fn)
def make_params_dense(tag, folder_id):
return {
"tag": tag,
"folder_id": folder_id,
"decay_n": 2,
"features": True,
"clip": 1.0,
"stochastic_clipping": False,
"clipping": False,
"path": os.path.join(app_cfg.DIR_INVERSES, tag),
"dataset": os.path.join(app_cfg.DIR_INVERSES, tag, "dataset.latent.hdf5"),
"inv_layer": "Generator_2/G_Z/Reshape:0",
# "inv_layer": "latent",
"decay_lr": True,
# "inv_it": 10000,
"inv_it": 30000,
"generator_path": "https://tfhub.dev/deepmind/biggan-512/2",
"attention_map_layer": "Generator_2/attention/Softmax:0",
"pre_trained_latent": True,
"likeli_loss": False,
"init_hi": 0.001,
"lr": 0.01,
"lr_quad": 0.001,
"norm_loss": False,
"generator_fixed_inputs": {
"truncation": 1.0
},
"log_z_norm": False,
# "feature_extractor_path": "https://tfhub.dev/google/imagenet/inception_v3/feature_vector/1",
"feature_extractor_path": "vgg_16",
"mse": True,
"custom_grad_relu": False,
"random_label": False,
"init_gen_dist": False,
"log_activation_layer": "Generator_2/GBlock/Relu:0",
"batch_size": 4,
"fixed_z": True,
"feature_extractor_output": "InceptionV3/Mixed_7a",
"init_lo": -0.001,
"lambda_dist": 10.0,
"lambda_feat": 1.0,
"lambda_mse": 1.0,
"lambda_reg": 0.1,
"dist_loss": False,
"sample_size": 4,
"out_dataset": os.path.join(app_cfg.DIR_INVERSES, tag, "dataset.dense.hdf5"),
"save_progress": True,
"max_batches": 0,
"invert_labels": True,
}
def params_dense_dict(tag, folder_id=59):
return ParamsDict(make_params_dense(tag, folder_id))
def save_params_dense(fp_out_dir, tag, folder_id=59):
data = make_params_dense(tag, folder_id)
fp_out_fn = os.path.join(fp_out_dir, "params_dense.json")
write_json(data, fp_out_fn)
|