diff options
| author | Jules Laplace <julescarbon@gmail.com> | 2018-06-08 11:41:31 +0200 |
|---|---|---|
| committer | Jules Laplace <julescarbon@gmail.com> | 2018-06-08 11:41:31 +0200 |
| commit | 6498985aff0b54ee74bb3367998d303baef062dc (patch) | |
| tree | cb2242ef77c4207bf2f774e0b9a048a2a2f076ba /live-mogrify.py | |
| parent | a2185a66341d382feb5948b000f4ddbbc3764c0e (diff) | |
fix
Diffstat (limited to 'live-mogrify.py')
| -rw-r--r-- | live-mogrify.py | 62 |
1 files changed, 31 insertions, 31 deletions
diff --git a/live-mogrify.py b/live-mogrify.py index d93991b..0348c54 100644 --- a/live-mogrify.py +++ b/live-mogrify.py @@ -158,32 +158,32 @@ def list_sequences(module): import torchvision.transforms as transforms -def get_transform(opt={}): - transform_list = [] - if opt.resize_or_crop == 'resize_and_crop': - osize = [opt.loadSize, opt.loadSize] - transform_list.append(transforms.Scale(osize, Image.BICUBIC)) - if opt.center_crop: - transform_list.append(transforms.CenterCrop(opt.fineSize)) - else: - transform_list.append(transforms.RandomCrop(opt.fineSize)) - # elif opt.resize_or_crop == 'crop': - # transform_list.append(transforms.RandomCrop(opt.fineSize)) - # elif opt.resize_or_crop == 'scale_width': - # transform_list.append(transforms.Lambda( - # lambda img: __scale_width(img, opt.fineSize))) - # elif opt.resize_or_crop == 'scale_width_and_crop': - # transform_list.append(transforms.Lambda( - # lambda img: __scale_width(img, opt.loadSize))) - # transform_list.append(transforms.RandomCrop(opt.fineSize)) +# def get_transform(opt={}): +# transform_list = [] +# if opt.resize_or_crop == 'resize_and_crop': +# osize = [opt.loadSize, opt.loadSize] +# transform_list.append(transforms.Scale(osize, Image.BICUBIC)) +# if opt.center_crop: +# transform_list.append(transforms.CenterCrop(opt.fineSize)) +# else: +# transform_list.append(transforms.RandomCrop(opt.fineSize)) +# # elif opt.resize_or_crop == 'crop': +# # transform_list.append(transforms.RandomCrop(opt.fineSize)) +# # elif opt.resize_or_crop == 'scale_width': +# # transform_list.append(transforms.Lambda( +# # lambda img: __scale_width(img, opt.fineSize))) +# # elif opt.resize_or_crop == 'scale_width_and_crop': +# # transform_list.append(transforms.Lambda( +# # lambda img: __scale_width(img, opt.loadSize))) +# # transform_list.append(transforms.RandomCrop(opt.fineSize)) - # if opt.isTrain and not opt.no_flip: - # transform_list.append(transforms.RandomHorizontalFlip()) +# # if opt.isTrain and not opt.no_flip: +# # transform_list.append(transforms.RandomHorizontalFlip()) - transform_list += [transforms.ToTensor(), - transforms.Normalize((0.5, 0.5, 0.5), - (0.5, 0.5, 0.5))] - return transforms.Compose(transform_list) +# transform_list += [transforms.ToTensor(), +# transforms.Normalize((0.5, 0.5, 0.5), +# (0.5, 0.5, 0.5))] +# return transforms.Compose(transform_list) def load_frame(opt, index): A_path = os.path.join(opt.render_dir, "frame_{:05d}.png".format(index)) @@ -309,17 +309,17 @@ def process_live_input(opt, data_opt, rpc_client, model): print("generating...") sequence_i = 1 i = 0 - #for i, data in enumerate(data_loader): - while True: + # while True: + + # data = load_frame(opt, i) + # if data is None: + # print("got no frame, exiting") + # break + for i, data in enumerate(data_loader): if i >= opt.how_many: print("generated {} images, exiting".format(i)) break - data = load_frame(opt, i) - if data is None: - print("got no frame, exiting") - break - if data_opt.load_checkpoint is True: model.save_dir = os.path.join(opt.checkpoints_dir, opt.module_name, data_opt.checkpoint_name) model.load_network(model.netG, 'G', data_opt.epoch) |
