import os from app.settings import app_cfg from app.utils.file_utils import write_json from app.search.params import ParamsDict def make_params_latent(tag): return { "tag": tag, "decay_n": 2, "features": True, "clip": 1.0, "stochastic_clipping": False, "clipping": False, "path": os.path.join(app_cfg.DIR_INVERSES, tag), "dataset": os.path.join(app_cfg.DIR_INVERSES, tag, "dataset.hdf5"), "out_dataset": os.path.join(app_cfg.DIR_INVERSES, tag, "dataset.latent.hdf5"), "inv_layer": "latent", "decay_lr": True, "inv_it": 5000, "generator_path": "https://tfhub.dev/deepmind/biggan-512/2", "attention_map_layer": "Generator_2/attention/Softmax:0", "pre_trained_latent": False, "lambda_dist": 0.0, "likeli_loss": True, "init_hi": 0.001, "lr": 0.1, "norm_loss": False, "generator_fixed_inputs": { "truncation": 1.0 }, "log_z_norm": True, "feature_extractor_path": "https://tfhub.dev/google/imagenet/inception_v3/feature_vector/1", "mse": True, "custom_grad_relu": False, "random_label": False, "lambda_feat": 1.0, "init_gen_dist": False, "log_activation_layer": "Generator_2/GBlock/Relu:0", "batch_size": 4, "fixed_z": False, "feature_extractor_output": "InceptionV3/Mixed_7a", "init_lo": -0.001, "lambda_mse": 1.0, "lambda_reg": 0.1, "dist_loss": False, "sample_size": 4, "invert_labels": False, "save_progress": True, } def params_latent(tag): return ParamsDict(make_params_latent(tag)) def save_params_latent(fp_out_dir, tag): data = make_params_latent(tag) fp_out_fn = os.path.join(fp_out_dir, "params_latent.json") write_json(data, fp_out_fn) def make_params_dense(tag, folder_id): return { "tag": tag, "folder_id": folder_id, "decay_n": 2, "features": True, "clip": 1.0, "stochastic_clipping": False, "clipping": False, "path": os.path.join(app_cfg.DIR_INVERSES, tag), "dataset": os.path.join(app_cfg.DIR_INVERSES, tag, "dataset.latent.hdf5"), "inv_layer": "Generator_2/G_Z/Reshape:0", # "inv_layer": "latent", "decay_lr": True, # "inv_it": 10000, "inv_it": 30000, "generator_path": "https://tfhub.dev/deepmind/biggan-512/2", "attention_map_layer": "Generator_2/attention/Softmax:0", "pre_trained_latent": True, "likeli_loss": False, "init_hi": 0.001, "lr": 0.01, "lr_quad": 0.001, "norm_loss": False, "generator_fixed_inputs": { "truncation": 1.0 }, "log_z_norm": False, # "feature_extractor_path": "https://tfhub.dev/google/imagenet/inception_v3/feature_vector/1", "feature_extractor_path": "vgg_16", "mse": True, "custom_grad_relu": False, "random_label": False, "init_gen_dist": False, "log_activation_layer": "Generator_2/GBlock/Relu:0", "batch_size": 4, "fixed_z": True, "feature_extractor_output": "InceptionV3/Mixed_7a", "init_lo": -0.001, "lambda_dist": 10.0, "lambda_feat": 1.0, "lambda_mse": 1.0, "lambda_reg": 0.1, "dist_loss": False, "sample_size": 4, "out_dataset": os.path.join(app_cfg.DIR_INVERSES, tag, "dataset.dense.hdf5"), "save_progress": True, "max_batches": 0, "invert_labels": True, } def params_dense_dict(tag, folder_id=59): return ParamsDict(make_params_dense(tag, folder_id)) def save_params_dense(fp_out_dir, tag, folder_id=59): data = make_params_dense(tag, folder_id) fp_out_fn = os.path.join(fp_out_dir, "params_dense.json") write_json(data, fp_out_fn)