1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
|
#!/usr/bin/env python2.7
import os
import sys
import getopt
import numpy
import torch
import PIL
import PIL.Image
from datetime import datetime
from network import Network, process
from moviepy.video.io.ffmpeg_reader import FFMPEG_VideoReader
from moviepy.video.io.ffmpeg_writer import FFMPEG_VideoWriter
##########################################################
arguments_strModel = 'lf'
arguments_strFirst = './images/first.png'
arguments_strSecond = './images/second.png'
arguments_strOut = './result.png'
arguments_strVideo = False
arguments_strVideoOut = datetime.now().strftime("sepconv_%Y%m%d_%H%M.mp4")
arguments_steps = 0
for strOption, strArgument in getopt.getopt(sys.argv[1:], '', [ strParameter[2:] + '=' for strParameter in sys.argv[1::2] ])[0]:
if strOption == '--model':
arguments_strModel = strArgument # which model to use, l1 or lf, please see our paper for more details
elif strOption == '--first':
arguments_strFirst = strArgument # path to the first frame
elif strOption == '--second':
arguments_strSecond = strArgument # path to the second frame
elif strOption == '--out':
arguments_strOut = strArgument # path to where the output should be stored
elif strOption == '--video':
arguments_strVideoOut = strArgument # path to video
elif strOption == '--video-out':
arguments_strVideoOut = strArgument # path to where the video should be stored
elif strOption == '--steps':
arguments_steps = int(strArgument)
# end
# end
if not os.path.exists('./renders'):
os.mkdir('renders')
moduleNetwork = Network(arguments_strModel).cuda()
tensorOutput = torch.FloatTensor()
def process_tree(moduleNetwork, a, b, tensorOutput, steps):
tensorInputFirst = torch.FloatTensor(a)
tensorInputSecond = torch.FloatTensor(b)
process(moduleNetwork, tensorInputFirst, tensorInputSecond, tensorOutput)
middle = tensorOutput.clamp(0.0, 1.0).numpy()
np_middle = (numpy.rollaxis(middle, 0, 3)[:,:,::-1] * 255.0).astype(numpy.uint8)
if steps < 2:
return [np_middle]
else:
left = process_tree(moduleNetwork, a, middle, tensorOutput, steps / 2)
right = process_tree(moduleNetwork, middle, b, tensorOutput, steps / 2)
return left + [np_middle] + right
if arguments_strVideo and arguments_strVideoOut:
reader = FFMPEG_VideoReader(arguments_strVideo, False)
writer = FFMPEG_VideoWriter(arguments_strVideoOut, reader.size, reader.fps*2)
reader.initialize()
nextFrame = reader.read_frame()
for x in range(0, reader.nframes):
firstFrame = nextFrame
nextFrame = reader.read_frame()
tensorInputFirst = torch.FloatTensor(numpy.rollaxis(firstFrame[:,:,::-1], 2, 0) / 255.0)
tensorInputSecond = torch.FloatTensor(numpy.rollaxis(nextFrame[:,:,::-1], 2, 0) / 255.0)
process(moduleNetwork, tensorInputFirst, tensorInputSecond, tensorOutput)
writer.write_frame(firstFrame)
writer.write_frame((numpy.rollaxis(tensorOutput.clamp(0.0, 1.0).numpy(), 0, 3)[:,:,::-1] * 255.0).astype(numpy.uint8))
writer.write_frame(nextFrame)
writer.close()
else:
# Process image
if arguments_steps == 0:
tensorInputFirst = torch.FloatTensor(numpy.rollaxis(numpy.asarray(PIL.Image.open(arguments_strFirst))[:,:,::-1], 2, 0).astype(numpy.float32) / 255.0)
tensorInputSecond = torch.FloatTensor(numpy.rollaxis(numpy.asarray(PIL.Image.open(arguments_strSecond))[:,:,::-1], 2, 0).astype(numpy.float32) / 255.0)
process(moduleNetwork, tensorInputFirst, tensorInputSecond, tensorOutput)
PIL.Image.fromarray((numpy.rollaxis(tensorOutput.clamp(0.0, 1.0).numpy(), 0, 3)[:,:,::-1] * 255.0).astype(numpy.uint8)).save(arguments_strOut)
else:
inputFirst = PIL.Image.open(arguments_strFirst)
inputSecond = PIL.Image.open(arguments_strSecond)
tensorInputFirst = numpy.rollaxis(numpy.asarray(inputFirst)[:,:,::-1], 2, 0).astype(numpy.float32) / 255.0
tensorInputSecond = numpy.rollaxis(numpy.asarray(inputSecond)[:,:,::-1], 2, 0).astype(numpy.float32) / 255.0
tree = process_tree(moduleNetwork, tensorInputFirst, tensorInputSecond, tensorOutput, arguments_steps)
outputPath = './renders/' + arguments_strVideoOut
print('writing {}'.format(outputPath))
print('frames: {}'.format(len(tree)))
writer = FFMPEG_VideoWriter(outputPath, (1024, 512), 25)
writer.write_frame(inputFirst)
for frame in tree:
writer.write_frame(frame)
writer.write_frame(inputSecond)
|