#!/usr/bin/env python2.7 import os import sys import getopt import numpy import torch import PIL import PIL.Image from datetime import datetime from network import Network, process from moviepy.video.io.ffmpeg_reader import FFMPEG_VideoReader from moviepy.video.io.ffmpeg_writer import FFMPEG_VideoWriter ########################################################## arguments_strModel = 'lf' arguments_strFirst = './images/first.png' arguments_strSecond = './images/second.png' arguments_strOut = './result.png' arguments_strVideo = False arguments_strVideoOut = datetime.now().strftime("sepconv_%Y%m%d_%H%M.mp4") arguments_steps = 0 for strOption, strArgument in getopt.getopt(sys.argv[1:], '', [ strParameter[2:] + '=' for strParameter in sys.argv[1::2] ])[0]: if strOption == '--model': arguments_strModel = strArgument # which model to use, l1 or lf, please see our paper for more details elif strOption == '--first': arguments_strFirst = strArgument # path to the first frame elif strOption == '--second': arguments_strSecond = strArgument # path to the second frame elif strOption == '--out': arguments_strOut = strArgument # path to where the output should be stored elif strOption == '--video': arguments_strVideoOut = strArgument # path to video elif strOption == '--video-out': arguments_strVideoOut = strArgument # path to where the video should be stored elif strOption == '--steps': arguments_steps = int(strArgument) # end # end if not os.path.exists('./renders'): os.mkdir('renders') moduleNetwork = Network(arguments_strModel).cuda() tensorOutput = torch.FloatTensor() index = 0 def process_tree(moduleNetwork, a, b, tensorOutput, steps): global index index += 1 if (index % 10) == 0: print("{}...".format(index)) tensorInputFirst = torch.FloatTensor(a) tensorInputSecond = torch.FloatTensor(b) process(moduleNetwork, tensorInputFirst, tensorInputSecond, tensorOutput) middle = tensorOutput.clamp(0.0, 1.0).numpy() np_middle = (numpy.rollaxis(middle, 0, 3)[:,:,::-1] * 255.0).astype(numpy.uint8) if steps < 2: return [np_middle] else: left = process_tree(moduleNetwork, a, middle, tensorOutput, steps / 2) right = process_tree(moduleNetwork, middle, b, tensorOutput, steps / 2) return left + [np_middle] + right if arguments_strVideo and arguments_strVideoOut: reader = FFMPEG_VideoReader(arguments_strVideo, False) writer = FFMPEG_VideoWriter(arguments_strVideoOut, reader.size, reader.fps*2) reader.initialize() nextFrame = reader.read_frame() for x in range(0, reader.nframes): firstFrame = nextFrame nextFrame = reader.read_frame() tensorInputFirst = torch.FloatTensor(numpy.rollaxis(firstFrame[:,:,::-1], 2, 0) / 255.0) tensorInputSecond = torch.FloatTensor(numpy.rollaxis(nextFrame[:,:,::-1], 2, 0) / 255.0) process(moduleNetwork, tensorInputFirst, tensorInputSecond, tensorOutput) writer.write_frame(firstFrame) writer.write_frame((numpy.rollaxis(tensorOutput.clamp(0.0, 1.0).numpy(), 0, 3)[:,:,::-1] * 255.0).astype(numpy.uint8)) writer.write_frame(nextFrame) writer.close() else: # Process image if arguments_steps == 0: tensorInputFirst = torch.FloatTensor(numpy.rollaxis(numpy.asarray(PIL.Image.open(arguments_strFirst))[:,:,::-1], 2, 0).astype(numpy.float32) / 255.0) tensorInputSecond = torch.FloatTensor(numpy.rollaxis(numpy.asarray(PIL.Image.open(arguments_strSecond))[:,:,::-1], 2, 0).astype(numpy.float32) / 255.0) process(moduleNetwork, tensorInputFirst, tensorInputSecond, tensorOutput) PIL.Image.fromarray((numpy.rollaxis(tensorOutput.clamp(0.0, 1.0).numpy(), 0, 3)[:,:,::-1] * 255.0).astype(numpy.uint8)).save(arguments_strOut) else: inputFirst = PIL.Image.open(arguments_strFirst) inputSecond = PIL.Image.open(arguments_strSecond) tensorInputFirst = numpy.rollaxis(numpy.asarray(inputFirst)[:,:,::-1], 2, 0).astype(numpy.float32) / 255.0 tensorInputSecond = numpy.rollaxis(numpy.asarray(inputSecond)[:,:,::-1], 2, 0).astype(numpy.float32) / 255.0 tree = process_tree(moduleNetwork, tensorInputFirst, tensorInputSecond, tensorOutput, arguments_steps) outputPath = './renders/' + arguments_strVideoOut print('writing {}'.format(outputPath)) print('frames: {}'.format(len(tree))) writer = FFMPEG_VideoWriter(outputPath, (1024, 512), 25) writer.write_frame(inputFirst) for frame in tree: writer.write_frame(frame) writer.write_frame(inputSecond)