#!/usr/bin/env python2.7 import os import sys import getopt import numpy import torch import PIL import PIL.Image from datetime import datetime from network import Network, process ########################################################## arguments_strModel = 'lf' arguments_strFirst = './images/first.png' arguments_strSecond = './images/second.png' arguments_strOut = './result.png' arguments_strVideoOut = datetime.now().strftime("sepconv_%Y%m%d_%H%M.mp4") arguments_steps = 0 for strOption, strArgument in getopt.getopt(sys.argv[1:], '', [ strParameter[2:] + '=' for strParameter in sys.argv[1::2] ])[0]: if strOption == '--model': arguments_strModel = strArgument # which model to use, l1 or lf, please see our paper for more details elif strOption == '--first': arguments_strFirst = strArgument # path to the first frame elif strOption == '--second': arguments_strSecond = strArgument # path to the second frame elif strOption == '--out': arguments_strOut = strArgument # path to where the output should be stored elif strOption == '--video-out': arguments_strVideoOut = strArgument # path to where the video should be stored elif strOption == '--steps': arguments_steps = int(strArgument) # end # end if not os.path.exists('./renders'): os.mkdir('renders') moduleNetwork = Network(arguments_strModel).cuda() tensorOutput = torch.FloatTensor() def process_tree(moduleNetwork, tensorInputFirst, tensorInputSecond, tensorOutput, steps): process(moduleNetwork, tensorInputFirst, tensorInputSecond, tensorOutput) tensorMiddle = (numpy.rollaxis(tensorOutput.clamp(0.0, 1.0).numpy(), 0, 3)[:,:,::-1] * 255.0).astype(numpy.uint8) if steps < 2: return [tensorMiddle] else: tensorLeft = process_tree(moduleNetwork, tensorInputFirst, tensorMiddle, tensorOutput, steps / 2) tensorRight = process_tree(moduleNetwork, tensorMiddle, tensorInputSecond, tensorOutput, steps / 2) return tensorLeft + [tensorMiddle] + tensorRight if arguments_strVideo and arguments_strVideoOut: reader = FFMPEG_VideoReader(arguments_strVideo, False) writer = FFMPEG_VideoWriter(arguments_strVideoOut, reader.size, reader.fps*2) reader.initialize() nextFrame = reader.read_frame() for x in range(0, reader.nframes): firstFrame = nextFrame nextFrame = reader.read_frame() tensorInputFirst = torch.FloatTensor(numpy.rollaxis(firstFrame[:,:,::-1], 2, 0) / 255.0) tensorInputSecond = torch.FloatTensor(numpy.rollaxis(nextFrame[:,:,::-1], 2, 0) / 255.0) process(moduleNetwork, tensorInputFirst, tensorInputSecond, tensorOutput) writer.write_frame(firstFrame) writer.write_frame((numpy.rollaxis(tensorOutput.clamp(0.0, 1.0).numpy(), 0, 3)[:,:,::-1] * 255.0).astype(numpy.uint8)) writer.write_frame(nextFrame) writer.close() else: # Process image tensorInputFirst = torch.FloatTensor(numpy.rollaxis(numpy.asarray(PIL.Image.open(arguments_strFirst))[:,:,::-1], 2, 0).astype(numpy.float32) / 255.0) tensorInputSecond = torch.FloatTensor(numpy.rollaxis(numpy.asarray(PIL.Image.open(arguments_strSecond))[:,:,::-1], 2, 0).astype(numpy.float32) / 255.0) if arguments_steps == 0: process(moduleNetwork, tensorInputFirst, tensorInputSecond, tensorOutput) PIL.Image.fromarray((numpy.rollaxis(tensorOutput.clamp(0.0, 1.0).numpy(), 0, 3)[:,:,::-1] * 255.0).astype(numpy.uint8)).save(arguments_strOut) else: tree = process_tree(moduleNetwork, tensorInputFirst, tensorInputSecond, tensorOutput, arguments_steps) writer = FFMPEG_VideoWriter('./renders/' + arguments_strVideoOut, (1024, 512), 25) writer.write_frame(tensorInputFirst) for frame in tree: writer.write_frame(frame) writer.write_frame(tensorInputSecond)