diff options
| author | Jules Laplace <julescarbon@gmail.com> | 2018-06-25 19:01:55 +0200 |
|---|---|---|
| committer | Jules Laplace <julescarbon@gmail.com> | 2018-06-25 19:01:55 +0200 |
| commit | 686e275524b102555d30b6454068a9d8af1b126c (patch) | |
| tree | f4dd8fab4512281f701093cf9ac5c93d088193d3 | |
| parent | abbd1a21a6d31223540bfe54108620377bfb5a51 (diff) | |
process sequences
| -rw-r--r-- | process-sequences.sh | 9 | ||||
| -rw-r--r-- | run.py | 65 |
2 files changed, 50 insertions, 24 deletions
diff --git a/process-sequences.sh b/process-sequences.sh index 326da41..7a81662 100644 --- a/process-sequences.sh +++ b/process-sequences.sh @@ -7,6 +7,7 @@ function process() { --first "../pix2pixhd/sequences/$sequence_a/" \ --second "../pix2pixhd/sequences/$sequence_b/" \ --steps 32 \ + --dilate 4 \ --video-out "morph_video_${sequence_a}.mp4" scp renders/* jules@lmno:asdf/neural/morph/ } @@ -21,11 +22,15 @@ function process_self() { --second "../pix2pixhd/sequences/$sequence_b/" \ --a-offset "$a_offset" \ --b-offset "$b_offset" \ - --steps 128 \ + --steps 64 \ + --dilate 4 \ --video-out "morph_self_${sequence_a}.mp4" scp renders/* jules@lmno:asdf/neural/morph/ } process wood dji_phantom_3_drone_flying_the_blue_ridge_mountains_ process a_walk_around_canary_wharf_spring_afternoon_in_london london_walk_around_the_roof_garden_of_crossrail_place_in_canary_wharf -process_self wood 0 500 +process_self wood 0 25 +process_self wood 0 250 +process_self wood 0 2500 + @@ -21,6 +21,7 @@ arguments_strOut = './result.png' arguments_strVideo = False arguments_strVideoOut = datetime.now().strftime("sepconv_%Y%m%d_%H%M.mp4") arguments_steps = 0 +arguments_dilate = 1 arguments_aOffset = 0 arguments_bOffset = 0 arguments_mixVideos = False @@ -41,6 +42,8 @@ for strOption, strArgument in getopt.getopt(sys.argv[1:], '', [ strParameter[2:] arguments_strVideoOut = strArgument # path to where the video should be stored elif strOption == '--steps': arguments_steps = int(strArgument) + elif strOption == '--dilate': + arguments_dilate = int(strArgument) elif strOption == '--mix-videos': arguments_mixVideos = True elif strOption == '--a-offset': @@ -66,21 +69,20 @@ def recurse_two_frames(moduleNetwork, tensorOutput, a_np, b_np, frame_index, mor if morph_index == frame_index: print("frame {}, depth {}".format(frame_index, depth)) - middle_img = (numpy.rollaxis(middle_np, 0, 3)[:,:,::-1] * 255.0).astype(numpy.uint8) - return middle_img + return middle_np if morph_index > frame_index: next_index = morph_index - step next_a_np = a_np next_b_np = middle_np - print("next index: {} - {}".format(next_index, step)) + # print("next index: {} - {}".format(next_index, step)) else: next_index = morph_index + step next_a_np = middle_np next_b_np = b_np - print("next index: {} + {}".format(next_index, step)) + # print("next index: {} + {}".format(next_index, step)) return recurse_two_frames(moduleNetwork, tensorOutput, next_a_np, next_b_np, frame_index, next_index, step/2, depth+1) -def recurse_videos(moduleNetwork, tensorOutput, a, b, a_offset, b_offset, count, step, frame_index): +def recurse_videos(moduleNetwork, tensorOutput, a, b, a_offset, b_offset, count, step, frame_index, dilate): global index index += 1 if (index % 10) == 0: @@ -91,18 +93,19 @@ def recurse_videos(moduleNetwork, tensorOutput, a, b, a_offset, b_offset, count, print("{} => {}".format(a_fn, b_fn)) a_np = load_image(a_fn) b_np = load_image(b_fn) - frame = recurse_two_frames(moduleNetwork, tensorOutput, a_np, b_np, frame_index, count / 2, count / 4) - if step < 2: - return [frame] + img_np = recurse_two_frames(moduleNetwork, tensorOutput, a_np, b_np, frame_index, count / 2, count / 4) + if step < 2 * dilate: + return [img_np] else: left = recurse_videos(moduleNetwork, tensorOutput, a, b, a_offset, b_offset, count, step, frame_index - (step/2)) right = recurse_videos(moduleNetwork, tensorOutput, a, b, a_offset, b_offset, count, step, frame_index + (step/2)) - return left + [frame] + right + return left + [img_np] + right -def process_two_videos(moduleNetwork, tensorOutput, a, b, a_offset, b_offset, steps): - return recurse_videos(moduleNetwork, tensorOutput, a, b, a_offset, b_offset, steps, steps, steps/2) +def process_two_videos(moduleNetwork, tensorOutput, a, b, a_offset, b_offset, steps, dilate): + steps *= dilate + return recurse_videos(moduleNetwork, tensorOutput, a, b, a_offset, b_offset, steps, steps, steps/2, dilate) -def process_tree(moduleNetwork, a, b, tensorOutput, steps): +def process_tree(moduleNetwork, a, b, tensorOutput, steps, dilate): global index index += 1 if (index % 10) == 0: @@ -110,14 +113,27 @@ def process_tree(moduleNetwork, a, b, tensorOutput, steps): tensorInputFirst = torch.FloatTensor(a) tensorInputSecond = torch.FloatTensor(b) process(moduleNetwork, tensorInputFirst, tensorInputSecond, tensorOutput) - middle = tensorOutput.clamp(0.0, 1.0).numpy() - np_middle = (numpy.rollaxis(middle, 0, 3)[:,:,::-1] * 255.0).astype(numpy.uint8) - if steps < 4: - return [np_middle] + middle_np = tensorOutput.clamp(0.0, 1.0).numpy() + if steps < 4 * dilate: + return [middle_np] else: - left = process_tree(moduleNetwork, a, middle, tensorOutput, steps / 2) - right = process_tree(moduleNetwork, middle, b, tensorOutput, steps / 2) - return left + [np_middle] + right + left = process_tree(moduleNetwork, a, middle_np, tensorOutput, steps / 2, dilate) + right = process_tree(moduleNetwork, middle_np, b, tensorOutput, steps / 2, dilate) + return left + [middle_np] + right + +def dilate_frames(moduleNetwork, tensorOutput, frames, dilate): + if dilate < 2: + return frames + print("dilating by a factor of {}".format(dilate)) + new_frames = [] + nextFrame = frames[0] + for i in range(1, len(frames)): + firstFrame = nextFrame + nextFrame = frames[i] + new_frames += [firstFrame] + new_frames += process_tree(module, firstFrame, nextFrame, tensorOutput, dilate, 1) + new_frames += nextFrame + return new_frames def store_frames(frames, outputPath, inputFirst=None, inputSecond=None): print('writing {}'.format(outputPath)) @@ -126,10 +142,13 @@ def store_frames(frames, outputPath, inputFirst=None, inputSecond=None): if inputFirst is not None: writer.write_frame(inputFirst) for frame in frames: - writer.write_frame(frame) + writer.write_frame(tensor_to_image(frame)) if inputSecond is not None: writer.write_frame(inputSecond) +def tensor_to_image(np_val): + return (numpy.rollaxis(np_val, 0, 3)[:,:,::-1] * 255.0).astype(numpy.uint8) + def load_image(path): return numpy.rollaxis(numpy.asarray(PIL.Image.open(path))[:,:,::-1], 2, 0).astype(numpy.float32) / 255.0 @@ -155,8 +174,9 @@ elif arguments_mixVideos: # Morph two videos print("morph two videos...") print("{} => {}".format(arguments_strFirst, arguments_strSecond)) - frames = process_two_videos(moduleNetwork, tensorOutput, arguments_strFirst, arguments_strSecond, arguments_aOffset, arguments_bOffset, arguments_steps) outputPath = './renders/' + arguments_strVideoOut + frames = process_two_videos(moduleNetwork, tensorOutput, arguments_strFirst, arguments_strSecond, arguments_aOffset, arguments_bOffset, arguments_steps, arguments_dilate) + dilate_frames(moduleNetwork, tensorOutput, frames, arguments_dilate) store_frames(frames, outputPath) elif arguments_steps == 0: # Process image @@ -169,6 +189,7 @@ else: print("{} => {}".format(arguments_strFirst, arguments_strSecond)) inputFirst = load_image(arguments_strFirst) inputSecond = load_image(arguments_strSecond) - frames = process_tree(moduleNetwork, inputFirst, inputSecond, tensorOutput, arguments_steps) outputPath = './renders/' + arguments_strVideoOut + frames = process_tree(moduleNetwork, inputFirst, inputSecond, tensorOutput, arguments_steps * arguments_dilate, arguments_dilate) + dilate_frames(moduleNetwork, tensorOutput, frames, arguments_dilate) store_frames(frames, outputPath, inputFirst, inputSecond) |
