diff options
| -rw-r--r-- | process-sequences.sh | 6 | ||||
| -rw-r--r-- | run.py | 26 |
2 files changed, 29 insertions, 3 deletions
diff --git a/process-sequences.sh b/process-sequences.sh index a189dc7..10ba72f 100644 --- a/process-sequences.sh +++ b/process-sequences.sh @@ -8,7 +8,8 @@ function process() { --second "../pix2pixhd/sequences/$sequence_b/" \ --steps 8 \ --dilate 8 \ - --video-out "morph_dilate_video_${sequence_a}.mp4" + --smooth True \ + --video-out "morph_smooth_${sequence_a}.mp4" scp renders/* jules@lmno:asdf/neural/morph/ } function process_self() { @@ -24,7 +25,8 @@ function process_self() { --b-offset "$b_offset" \ --steps 8 \ --dilate 16 \ - --video-out "morph_dilate_self_${b_offset}_${sequence}.mp4" + --smooth True \ + --video-out "morph_smooth_self_${b_offset}_${sequence}.mp4" scp renders/* jules@lmno:asdf/neural/morph/ } @@ -22,6 +22,7 @@ arguments_strVideo = False arguments_strVideoOut = datetime.now().strftime("sepconv_%Y%m%d_%H%M.mp4") arguments_steps = 0 arguments_dilate = 1 +arguments_smooth = False arguments_aOffset = 0 arguments_bOffset = 0 arguments_mixVideos = False @@ -44,8 +45,10 @@ for strOption, strArgument in getopt.getopt(sys.argv[1:], '', [ strParameter[2:] arguments_steps = int(strArgument) elif strOption == '--dilate': arguments_dilate = int(strArgument) + elif strOption == '--smooth': + arguments_dilate = bool(strArgument) elif strOption == '--mix-videos': - arguments_mixVideos = True + arguments_mixVideos = bool(strArgument) elif strOption == '--a-offset': arguments_aOffset = int(strArgument) elif strOption == '--b-offset': @@ -121,6 +124,25 @@ def process_tree(moduleNetwork, a, b, tensorOutput, steps, dilate): right = process_tree(moduleNetwork, middle_np, b, tensorOutput, steps / 2, dilate) return left + [middle_np] + right +def smooth_frames(moduleNetwork, tensorOutput, frames, smooth): + if not smooth: + return frames + print("smoothing every other frame") + firstFrame = frames[0] + new_frames = [firstFrame] + for i in range(1, len(frames), 2): + firstFrame = frames[i] + nextFrame = frames[i+2] + + tensorInputFirst = torch.FloatTensor(firstFrame) + tensorInputSecond = torch.FloatTensor(nextFrame) + process(moduleNetwork, tensorInputFirst, tensorInputSecond, tensorOutput) + middle_np = tensorOutput.clamp(0.0, 1.0).numpy() + + new_frames += [firstFrame, middle_np] + new_frames += [nextFrame] + return new_frames + def dilate_frames(moduleNetwork, tensorOutput, frames, dilate): if dilate < 2: return frames @@ -176,6 +198,7 @@ elif arguments_mixVideos: print("{} => {}".format(arguments_strFirst, arguments_strSecond)) outputPath = './renders/' + arguments_strVideoOut frames = process_two_videos(moduleNetwork, tensorOutput, arguments_strFirst, arguments_strSecond, arguments_aOffset, arguments_bOffset, arguments_steps, arguments_dilate) + frames = smooth_frames(moduleNetwork, tensorOutput, frames, arguments_smooth) frames = dilate_frames(moduleNetwork, tensorOutput, frames, arguments_dilate) store_frames(frames, outputPath) elif arguments_steps == 0: @@ -191,5 +214,6 @@ else: inputSecond = load_image(arguments_strSecond) outputPath = './renders/' + arguments_strVideoOut frames = process_tree(moduleNetwork, inputFirst, inputSecond, tensorOutput, arguments_steps * arguments_dilate, arguments_dilate) + frames = smooth_frames(moduleNetwork, tensorOutput, frames, arguments_smooth) frames = dilate_frames(moduleNetwork, tensorOutput, frames, arguments_dilate) store_frames(frames, outputPath, inputFirst, inputSecond) |
