diff options
| -rw-r--r-- | data/aligned_data_loader.py | 8 | ||||
| -rw-r--r-- | data/unaligned_data_loader.py | 14 | ||||
| -rw-r--r-- | options/base_options.py | 1 | ||||
| -rw-r--r-- | options/train_options.py | 8 |
4 files changed, 19 insertions, 12 deletions
diff --git a/data/aligned_data_loader.py b/data/aligned_data_loader.py index a1efde8..039c113 100644 --- a/data/aligned_data_loader.py +++ b/data/aligned_data_loader.py @@ -43,12 +43,16 @@ class AlignedDataLoader(BaseDataLoader): def initialize(self, opt): BaseDataLoader.initialize(self, opt) self.fineSize = opt.fineSize - transform = transforms.Compose([ + + transformations = [ # TODO: Scale transforms.Scale(opt.loadSize), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), - (0.5, 0.5, 0.5))]) + (0.5, 0.5, 0.5))] + if opt.isTrain and not opt.no_flip: + transformations.insert(1, transforms.RandomHorizontalFlip()) + transform = transforms.Compose(transformations) # Dataset A dataset = ImageFolder(root=opt.dataroot + '/' + opt.phase, diff --git a/data/unaligned_data_loader.py b/data/unaligned_data_loader.py index 77f9274..3deb55b 100644 --- a/data/unaligned_data_loader.py +++ b/data/unaligned_data_loader.py @@ -53,12 +53,14 @@ class PairedData(object): class UnalignedDataLoader(BaseDataLoader): def initialize(self, opt): BaseDataLoader.initialize(self, opt) - transform = transforms.Compose([ - transforms.Scale(opt.loadSize), - transforms.RandomCrop(opt.fineSize), - transforms.ToTensor(), - transforms.Normalize((0.5, 0.5, 0.5), - (0.5, 0.5, 0.5))]) + transformations = [transforms.Scale(opt.loadSize), + transforms.RandomCrop(opt.fineSize), + transforms.ToTensor(), + transforms.Normalize((0.5, 0.5, 0.5), + (0.5, 0.5, 0.5))] + if opt.isTrain and not opt.no_flip: + transformations.insert(1, transforms.RandomHorizontalFlip()) + transform = transforms.Compose(transformations) # Dataset A dataset_A = ImageFolder(root=opt.dataroot + '/' + opt.phase + 'A', diff --git a/options/base_options.py b/options/base_options.py index 9ec7c9a..619ca60 100644 --- a/options/base_options.py +++ b/options/base_options.py @@ -20,7 +20,6 @@ class BaseOptions(): self.parser.add_argument('--which_model_netG', type=str, default='resnet_9blocks', help='selects model to use for netG') self.parser.add_argument('--n_layers_D', type=int, default=3, help='only used if which_model_netD==n_layers') self.parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2') - self.parser.add_argument('--flip' , action='store_true', help='if flip the images for data argumentation') self.parser.add_argument('--name', type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models') self.parser.add_argument('--align_data', action='store_true', help='if True, the datasets are loaded from "test" and "train" directories and the data pairs are aligned') diff --git a/options/train_options.py b/options/train_options.py index 4b4eac3..a1d347f 100644 --- a/options/train_options.py +++ b/options/train_options.py @@ -10,14 +10,16 @@ class TrainOptions(BaseOptions): self.parser.add_argument('--continue_train', action='store_true', help='continue training: load the latest model') self.parser.add_argument('--phase', type=str, default='train', help='train, val, test, etc') self.parser.add_argument('--which_epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model') - self.parser.add_argument('--niter', type=int, default=200, help='# of iter at starting learning rate') - self.parser.add_argument('--niter_decay', type=int, default=0, help='# of iter to linearly decay learning rate to zero') + self.parser.add_argument('--niter', type=int, default=100, help='# of iter at starting learning rate') + self.parser.add_argument('--niter_decay', type=int, default=100, help='# of iter to linearly decay learning rate to zero') self.parser.add_argument('--beta1', type=float, default=0.5, help='momentum term of adam') self.parser.add_argument('--lr', type=float, default=0.0002, help='initial learning rate for adam') self.parser.add_argument('--no_lsgan', action='store_true', help='do *not* use least square GAN, if false, use vanilla GAN') self.parser.add_argument('--lambda_A', type=float, default=10.0, help='weight for cycle loss (A -> B -> A)') self.parser.add_argument('--lambda_B', type=float, default=10.0, help='weight for cycle loss (B -> A -> B)') - self.parser.add_argument('--pool_size', type=int, default=0, help='the size of image buffer that stores previously generated images') + self.parser.add_argument('--pool_size', type=int, default=50, help='the size of image buffer that stores previously generated images') self.parser.add_argument('--no_html', action='store_true', help='do not save intermediate training results to [opt.checkpoints_dir]/[opt.name]/web/') + self.parser.add_argument('--no_flip' , action='store_true', help='if specified, do not flip the images for data argumentation') + # NOT-IMPLEMENTED self.parser.add_argument('--preprocessing', type=str, default='resize_and_crop', help='resizing/cropping strategy') self.isTrain = True |
