summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJules Laplace <julescarbon@gmail.com>2018-06-25 17:44:04 +0200
committerJules Laplace <julescarbon@gmail.com>2018-06-25 17:44:04 +0200
commit64a322888397bb7170a5f1eb4719e8efcd7d43c9 (patch)
tree7d337308d1dfa8882ef704ed9f1b09f957ec594e
parent9f551f37c4e9912340557d4613661f502241c50e (diff)
run thang
-rw-r--r--run.py112
1 files changed, 82 insertions, 30 deletions
diff --git a/run.py b/run.py
index 046473a..5886677 100644
--- a/run.py
+++ b/run.py
@@ -21,30 +21,29 @@ arguments_strOut = './result.png'
arguments_strVideo = False
arguments_strVideoOut = datetime.now().strftime("sepconv_%Y%m%d_%H%M.mp4")
arguments_steps = 0
+arguments_aOffset = 0
+arguments_bOffset = 0
for strOption, strArgument in getopt.getopt(sys.argv[1:], '', [ strParameter[2:] + '=' for strParameter in sys.argv[1::2] ])[0]:
if strOption == '--model':
arguments_strModel = strArgument # which model to use, l1 or lf, please see our paper for more details
-
elif strOption == '--first':
arguments_strFirst = strArgument # path to the first frame
-
elif strOption == '--second':
arguments_strSecond = strArgument # path to the second frame
-
elif strOption == '--out':
arguments_strOut = strArgument # path to where the output should be stored
-
elif strOption == '--video':
arguments_strVideoOut = strArgument # path to video
-
elif strOption == '--video-out':
arguments_strVideoOut = strArgument # path to where the video should be stored
-
elif strOption == '--steps':
arguments_steps = int(strArgument)
- # end
-# end
+ elif strOption == '--a-offset':
+ arguments_aOffset = int(strArgument)
+ elif strOption == '--b-offset':
+ arguments_bOffset = int(strArgument)
+
if not os.path.exists('./renders'):
os.mkdir('renders')
@@ -53,6 +52,48 @@ tensorOutput = torch.FloatTensor()
index = 0
+def recurse_two_frames(moduleNetwork, tensorOutput, a_np, b_np, frame_index, morph_index, step, depth=0):
+ step /= 2
+
+ tensorInputFirst = torch.FloatTensor(a_np)
+ tensorInputSecond = torch.FloatTensor(b_np)
+ process(moduleNetwork, tensorInputFirst, tensorInputSecond, tensorOutput)
+ middle_np = tensorOutput.clamp(0.0, 1.0).numpy()
+ middle_img = (numpy.rollaxis(middle_np, 0, 3)[:,:,::-1] * 255.0).astype(numpy.uint8)
+
+ if morph_index == frame_index:
+ print("frame {}, depth {}".format(frame_index, depth))
+ middle_img = (numpy.rollaxis(middle_np, 0, 3)[:,:,::-1] * 255.0).astype(numpy.uint8)
+ return middle_img
+ if morph_index > frame_index:
+ next_index = frame_index - (step/2)
+ next_a_np = a_np
+ next_b_np = middle_img
+ else:
+ next_index = frame_index + (step/2)
+ next_a_np = middle_img
+ next_b_np = b_np
+ return recurse_two_frames(moduleNetwork, tensorOutput, next_a_np, next_b_np, frame_index, next_index, step, depth+1)
+
+def recurse_videos(moduleNetwork, tensorOutput, a, b, a_offset, b_offset, count, step, frame_index):
+ global index
+ index += 1
+ if (index % 10) == 0:
+ print("{}...".format(index))
+ step /= 2
+ a_img = load_image(os.path.join(a, "frame_{:05d}.png".format(frame_index + a_offset)))
+ b_img = load_image(os.path.join(b, "frame_{:05d}.png".format(frame_index + b_offset)))
+ frame = process_two_frames(moduleNetwork, tensorOutput, a, b, a_offset, b_offset, a_img, b_img, frame_index, count, count / 2)
+ if step < 2:
+ return [frame]
+ else:
+ left = process_two_videos(moduleNetwork, tensorOutput, a, b, a_offset, b_offset, count, step, frame_index - (step/2))
+ right = process_two_videos(moduleNetwork, tensorOutput, a, b, a_offset, b_offset, count, step, frame_index + (step/2))
+ return left + [frame] + right
+
+def process_two_videos(moduleNetwork, tensorOutput, a, b, a_offset, b_offset, steps):
+ return recurse_videos(moduleNetwork, tensorOutput, a, b, a_offset, b_offset, steps, steps, steps/2)
+
def process_tree(moduleNetwork, a, b, tensorOutput, steps):
global index
index += 1
@@ -63,13 +104,26 @@ def process_tree(moduleNetwork, a, b, tensorOutput, steps):
process(moduleNetwork, tensorInputFirst, tensorInputSecond, tensorOutput)
middle = tensorOutput.clamp(0.0, 1.0).numpy()
np_middle = (numpy.rollaxis(middle, 0, 3)[:,:,::-1] * 255.0).astype(numpy.uint8)
- if steps < 2:
+ if steps < 4:
return [np_middle]
else:
left = process_tree(moduleNetwork, a, middle, tensorOutput, steps / 2)
right = process_tree(moduleNetwork, middle, b, tensorOutput, steps / 2)
return left + [np_middle] + right
+def store_frames(frames, outputPath):
+ print('writing {}'.format(outputPath))
+ print('frames: {}'.format(len(frames)))
+ writer = FFMPEG_VideoWriter(outputPath, (1024, 512), 25)
+ writer.write_frame(inputFirst)
+ for frame in frames:
+ writer.write_frame(frame)
+ writer.write_frame(inputSecond)
+def load_image(path):
+ return numpy.rollaxis(numpy.asarray(PIL.Image.open(path))[:,:,::-1], 2, 0).astype(numpy.float32) / 255.0
+def load_image_tensor(path):
+ return torch.FloatTensor(load_image(path))
+
if arguments_strVideo and arguments_strVideoOut:
reader = FFMPEG_VideoReader(arguments_strVideo, False)
writer = FFMPEG_VideoWriter(arguments_strVideoOut, reader.size, reader.fps*2)
@@ -85,25 +139,23 @@ if arguments_strVideo and arguments_strVideoOut:
writer.write_frame((numpy.rollaxis(tensorOutput.clamp(0.0, 1.0).numpy(), 0, 3)[:,:,::-1] * 255.0).astype(numpy.uint8))
writer.write_frame(nextFrame)
writer.close()
-else:
+elif arguments_strVideoOut:
+ # Morph two videos
+ print("{} => {}".format(arguments_strFirst, arguments_strSecond))
+ frames = process_two_videos(moduleNetwork, tensorOutput, arguments_strFirst, arguments_strSecond, arguments_aOffset, arguments_bOffset, arguments_steps)
+ outputPath = './renders/' + arguments_strVideoOut
+ store_frames(frames, outputPath)
+elif arguments_steps == 0:
# Process image
- if arguments_steps == 0:
- tensorInputFirst = torch.FloatTensor(numpy.rollaxis(numpy.asarray(PIL.Image.open(arguments_strFirst))[:,:,::-1], 2, 0).astype(numpy.float32) / 255.0)
- tensorInputSecond = torch.FloatTensor(numpy.rollaxis(numpy.asarray(PIL.Image.open(arguments_strSecond))[:,:,::-1], 2, 0).astype(numpy.float32) / 255.0)
- process(moduleNetwork, tensorInputFirst, tensorInputSecond, tensorOutput)
- PIL.Image.fromarray((numpy.rollaxis(tensorOutput.clamp(0.0, 1.0).numpy(), 0, 3)[:,:,::-1] * 255.0).astype(numpy.uint8)).save(arguments_strOut)
- else:
- print("{} => {}".format(arguments_strFirst, arguments_strSecond))
- inputFirst = PIL.Image.open(arguments_strFirst)
- inputSecond = PIL.Image.open(arguments_strSecond)
- tensorInputFirst = numpy.rollaxis(numpy.asarray(inputFirst)[:,:,::-1], 2, 0).astype(numpy.float32) / 255.0
- tensorInputSecond = numpy.rollaxis(numpy.asarray(inputSecond)[:,:,::-1], 2, 0).astype(numpy.float32) / 255.0
- tree = process_tree(moduleNetwork, tensorInputFirst, tensorInputSecond, tensorOutput, arguments_steps)
- outputPath = './renders/' + arguments_strVideoOut
- print('writing {}'.format(outputPath))
- print('frames: {}'.format(len(tree)))
- writer = FFMPEG_VideoWriter(outputPath, (1024, 512), 25)
- writer.write_frame(inputFirst)
- for frame in tree:
- writer.write_frame(frame)
- writer.write_frame(inputSecond)
+ tensorInputFirst = load_image_tensor(arguments_strFirst)
+ tensorInputSecond = load_image_tensor(arguments_strSecond)
+ process(moduleNetwork, tensorInputFirst, tensorInputSecond, tensorOutput)
+ PIL.Image.fromarray((numpy.rollaxis(tensorOutput.clamp(0.0, 1.0).numpy(), 0, 3)[:,:,::-1] * 255.0).astype(numpy.uint8)).save(arguments_strOut)
+else:
+ # Morph two images
+ print("{} => {}".format(arguments_strFirst, arguments_strSecond))
+ inputFirst = load_image(arguments_strFirst)
+ inputSecond = load_image(arguments_strSecond)
+ frames = process_tree(moduleNetwork, inputFirst, inputSecond, tensorOutput, arguments_steps)
+ outputPath = './renders/' + arguments_strVideoOut
+ store_frames(frames, outputPath)