diff options
Diffstat (limited to 'options')
| -rw-r--r-- | options/base_options.py | 4 | ||||
| -rw-r--r-- | options/train_options.py | 5 |
2 files changed, 5 insertions, 4 deletions
diff --git a/options/base_options.py b/options/base_options.py index 4074746..bce0b9c 100644 --- a/options/base_options.py +++ b/options/base_options.py @@ -1,7 +1,7 @@ import argparse import os from util import util -from pdb import set_trace as st + class BaseOptions(): def __init__(self): self.parser = argparse.ArgumentParser() @@ -35,6 +35,8 @@ class BaseOptions(): self.parser.add_argument('--display_id', type=int, default=1, help='window id of the web display') self.parser.add_argument('--identity', type=float, default=0.0, help='use identity mapping. Setting identity other than 1 has an effect of scaling the weight of the identity mapping loss. For example, if the weight of the identity loss should be 10 times smaller than the weight of the reconstruction loss, please set optidentity = 0.1') self.parser.add_argument('--use_dropout', action='store_true', help='use dropout for the generator') + self.parser.add_argument('--max_dataset_size', type=int, default=float("inf"), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.') + self.initialized = True def parse(self): diff --git a/options/train_options.py b/options/train_options.py index b241863..4b4eac3 100644 --- a/options/train_options.py +++ b/options/train_options.py @@ -10,10 +10,9 @@ class TrainOptions(BaseOptions): self.parser.add_argument('--continue_train', action='store_true', help='continue training: load the latest model') self.parser.add_argument('--phase', type=str, default='train', help='train, val, test, etc') self.parser.add_argument('--which_epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model') - self.parser.add_argument('--niter', type=int, default=100, help='# of iter at starting learning rate') - self.parser.add_argument('--niter_decay', type=int, default=100, help='# of iter to linearly decay learning rate to zero') + self.parser.add_argument('--niter', type=int, default=200, help='# of iter at starting learning rate') + self.parser.add_argument('--niter_decay', type=int, default=0, help='# of iter to linearly decay learning rate to zero') self.parser.add_argument('--beta1', type=float, default=0.5, help='momentum term of adam') - self.parser.add_argument('--ntrain', type=int, default=float("inf"), help='# of examples per epoch.') self.parser.add_argument('--lr', type=float, default=0.0002, help='initial learning rate for adam') self.parser.add_argument('--no_lsgan', action='store_true', help='do *not* use least square GAN, if false, use vanilla GAN') self.parser.add_argument('--lambda_A', type=float, default=10.0, help='weight for cycle loss (A -> B -> A)') |
