summaryrefslogtreecommitdiff
path: root/options
diff options
context:
space:
mode:
Diffstat (limited to 'options')
-rw-r--r--options/base_options.py1
-rw-r--r--options/train_options.py8
2 files changed, 5 insertions, 4 deletions
diff --git a/options/base_options.py b/options/base_options.py
index 9ec7c9a..619ca60 100644
--- a/options/base_options.py
+++ b/options/base_options.py
@@ -20,7 +20,6 @@ class BaseOptions():
self.parser.add_argument('--which_model_netG', type=str, default='resnet_9blocks', help='selects model to use for netG')
self.parser.add_argument('--n_layers_D', type=int, default=3, help='only used if which_model_netD==n_layers')
self.parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2')
- self.parser.add_argument('--flip' , action='store_true', help='if flip the images for data argumentation')
self.parser.add_argument('--name', type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models')
self.parser.add_argument('--align_data', action='store_true',
help='if True, the datasets are loaded from "test" and "train" directories and the data pairs are aligned')
diff --git a/options/train_options.py b/options/train_options.py
index 4b4eac3..a1d347f 100644
--- a/options/train_options.py
+++ b/options/train_options.py
@@ -10,14 +10,16 @@ class TrainOptions(BaseOptions):
self.parser.add_argument('--continue_train', action='store_true', help='continue training: load the latest model')
self.parser.add_argument('--phase', type=str, default='train', help='train, val, test, etc')
self.parser.add_argument('--which_epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')
- self.parser.add_argument('--niter', type=int, default=200, help='# of iter at starting learning rate')
- self.parser.add_argument('--niter_decay', type=int, default=0, help='# of iter to linearly decay learning rate to zero')
+ self.parser.add_argument('--niter', type=int, default=100, help='# of iter at starting learning rate')
+ self.parser.add_argument('--niter_decay', type=int, default=100, help='# of iter to linearly decay learning rate to zero')
self.parser.add_argument('--beta1', type=float, default=0.5, help='momentum term of adam')
self.parser.add_argument('--lr', type=float, default=0.0002, help='initial learning rate for adam')
self.parser.add_argument('--no_lsgan', action='store_true', help='do *not* use least square GAN, if false, use vanilla GAN')
self.parser.add_argument('--lambda_A', type=float, default=10.0, help='weight for cycle loss (A -> B -> A)')
self.parser.add_argument('--lambda_B', type=float, default=10.0, help='weight for cycle loss (B -> A -> B)')
- self.parser.add_argument('--pool_size', type=int, default=0, help='the size of image buffer that stores previously generated images')
+ self.parser.add_argument('--pool_size', type=int, default=50, help='the size of image buffer that stores previously generated images')
self.parser.add_argument('--no_html', action='store_true', help='do not save intermediate training results to [opt.checkpoints_dir]/[opt.name]/web/')
+ self.parser.add_argument('--no_flip' , action='store_true', help='if specified, do not flip the images for data argumentation')
+
# NOT-IMPLEMENTED self.parser.add_argument('--preprocessing', type=str, default='resize_and_crop', help='resizing/cropping strategy')
self.isTrain = True