diff options
| author | SsnL <tongzhou.wang.1994@gmail.com> | 2017-07-06 22:19:53 -0500 |
|---|---|---|
| committer | SsnL <tongzhou.wang.1994@gmail.com> | 2017-07-06 22:31:24 -0500 |
| commit | 25124b8389f80d7a509b2d98ef69589cab597c9a (patch) | |
| tree | 185d876bb0fed0e681f163e79ad810e597c8dd8c /data/single_dataset.py | |
| parent | ee0a8292e2b87449c325bdb9439f90f911a0c0a1 (diff) | |
resize_or_crop and better display single image
Diffstat (limited to 'data/single_dataset.py')
| -rw-r--r-- | data/single_dataset.py | 18 |
1 files changed, 2 insertions, 16 deletions
diff --git a/data/single_dataset.py b/data/single_dataset.py index 106bea3..faf416a 100644 --- a/data/single_dataset.py +++ b/data/single_dataset.py @@ -1,6 +1,6 @@ import os.path import torchvision.transforms as transforms -from data.base_dataset import BaseDataset +from data.base_dataset import BaseDataset, get_transform from data.image_folder import make_dataset from PIL import Image @@ -15,21 +15,7 @@ class SingleDataset(BaseDataset): self.A_paths = sorted(self.A_paths) - transform_list = [] - if opt.resize_or_crop == 'resize_and_crop': - transform_list.append(transforms.Scale(opt.loadSize)) - - if opt.isTrain and not opt.no_flip: - transform_list.append(transforms.RandomHorizontalFlip()) - - if opt.resize_or_crop != 'no_resize': - transform_list.append(transforms.RandomCrop(opt.fineSize)) - - transform_list += [transforms.ToTensor(), - transforms.Normalize((0.5, 0.5, 0.5), - (0.5, 0.5, 0.5))] - - self.transform = transforms.Compose(transform_list) + self.transform = get_transform(opt) def __getitem__(self, index): A_path = self.A_paths[index] |
