summaryrefslogtreecommitdiff
path: root/options
diff options
context:
space:
mode:
authorjunyanz <junyanz@berkeley.edu>2017-10-06 10:46:43 -0700
committerjunyanz <junyanz@berkeley.edu>2017-10-06 10:46:43 -0700
commit7800d516596f1a25986b458cddf8b8785bcc7df8 (patch)
tree56d57350e7104393f939ec7cc2e07c96840aaa27 /options
parente986144cee13a921fd3ad68d564f820e8f7dd3b0 (diff)
support nc=1, add new leaerning rate policy and new initialization
Diffstat (limited to 'options')
-rw-r--r--options/base_options.py1
-rw-r--r--options/train_options.py3
2 files changed, 4 insertions, 0 deletions
diff --git a/options/base_options.py b/options/base_options.py
index de8bc74..c1b0733 100644
--- a/options/base_options.py
+++ b/options/base_options.py
@@ -39,6 +39,7 @@ class BaseOptions():
self.parser.add_argument('--resize_or_crop', type=str, default='resize_and_crop', help='scaling and cropping of images at load time [resize_and_crop|crop|scale_width|scale_width_and_crop]')
self.parser.add_argument('--no_flip', action='store_true', help='if specified, do not flip the images for data augmentation')
self.parser.add_argument('--identity', type=float, default=0.0, help='use identity mapping. Setting identity other than 1 has an effect of scaling the weight of the identity mapping loss. For example, if the weight of the identity loss should be 10 times smaller than the weight of the reconstruction loss, please set optidentity = 0.1')
+ self.parser.add_argument('--init_type', type=str, default='xavier', help='network initialization [normal|xavier|kaiming|orthogonal]')
self.initialized = True
diff --git a/options/train_options.py b/options/train_options.py
index a595017..f8a0ff6 100644
--- a/options/train_options.py
+++ b/options/train_options.py
@@ -21,4 +21,7 @@ class TrainOptions(BaseOptions):
self.parser.add_argument('--lambda_B', type=float, default=10.0, help='weight for cycle loss (B -> A -> B)')
self.parser.add_argument('--pool_size', type=int, default=50, help='the size of image buffer that stores previously generated images')
self.parser.add_argument('--no_html', action='store_true', help='do not save intermediate training results to [opt.checkpoints_dir]/[opt.name]/web/')
+ self.parser.add_argument('--lr_policy', type=str, default='lambda', help='learning rate policy: lambda|step|plateau')
+ self.parser.add_argument('--lr_decay_iters', type=int, default=50, help='multiply by a gamma every lr_decay_iters iterations')
+
self.isTrain = True