diff options
| author | Jules Laplace <julescarbon@gmail.com> | 2018-06-25 18:00:36 +0200 |
|---|---|---|
| committer | Jules Laplace <julescarbon@gmail.com> | 2018-06-25 18:00:36 +0200 |
| commit | b1b6a118d1c1f04164e1c6f386cd345597dbf3a7 (patch) | |
| tree | 2d9839b87785e14e07d072d8a9a5788e476785e8 /run.py | |
| parent | bf3ab6c2f4975dbf73f1ad8f7d755d6ebab8eb52 (diff) | |
process sequences
Diffstat (limited to 'run.py')
| -rw-r--r-- | run.py | 42 |
1 files changed, 21 insertions, 21 deletions
@@ -56,7 +56,7 @@ tensorOutput = torch.FloatTensor() index = 0 def recurse_two_frames(moduleNetwork, tensorOutput, a_np, b_np, frame_index, morph_index, step, depth=0): - step /= 2 + step /= 2 tensorInputFirst = torch.FloatTensor(a_np) tensorInputSecond = torch.FloatTensor(b_np) @@ -64,35 +64,35 @@ def recurse_two_frames(moduleNetwork, tensorOutput, a_np, b_np, frame_index, mor middle_np = tensorOutput.clamp(0.0, 1.0).numpy() middle_img = (numpy.rollaxis(middle_np, 0, 3)[:,:,::-1] * 255.0).astype(numpy.uint8) - if morph_index == frame_index: - print("frame {}, depth {}".format(frame_index, depth)) + if morph_index == frame_index: + print("frame {}, depth {}".format(frame_index, depth)) middle_img = (numpy.rollaxis(middle_np, 0, 3)[:,:,::-1] * 255.0).astype(numpy.uint8) - return middle_img - if morph_index > frame_index: - next_index = frame_index - (step/2) - next_a_np = a_np - next_b_np = middle_img - else: - next_index = frame_index + (step/2) - next_a_np = middle_img - next_b_np = b_np - return recurse_two_frames(moduleNetwork, tensorOutput, next_a_np, next_b_np, frame_index, next_index, step, depth+1) + return middle_img + if morph_index > frame_index: + next_index = frame_index - (step/2) + next_a_np = a_np + next_b_np = middle_img + else: + next_index = frame_index + (step/2) + next_a_np = middle_img + next_b_np = b_np + return recurse_two_frames(moduleNetwork, tensorOutput, next_a_np, next_b_np, frame_index, next_index, step, depth+1) def recurse_videos(moduleNetwork, tensorOutput, a, b, a_offset, b_offset, count, step, frame_index): global index index += 1 if (index % 10) == 0: print("{}...".format(index)) - step /= 2 + step /= 2 a_img = load_image(os.path.join(a, "frame_{:05d}.png".format(frame_index + a_offset))) b_img = load_image(os.path.join(b, "frame_{:05d}.png".format(frame_index + b_offset))) - frame = process_two_frames(moduleNetwork, tensorOutput, a, b, a_offset, b_offset, a_img, b_img, frame_index, count, count / 2) - if step < 2: - return [frame] - else: - left = process_two_videos(moduleNetwork, tensorOutput, a, b, a_offset, b_offset, count, step, frame_index - (step/2)) - right = process_two_videos(moduleNetwork, tensorOutput, a, b, a_offset, b_offset, count, step, frame_index + (step/2)) - return left + [frame] + right + frame = process_two_frames(moduleNetwork, tensorOutput, a, b, a_offset, b_offset, a_img, b_img, frame_index, count, count / 2) + if step < 2: + return [frame] + else: + left = process_two_videos(moduleNetwork, tensorOutput, a, b, a_offset, b_offset, count, step, frame_index - (step/2)) + right = process_two_videos(moduleNetwork, tensorOutput, a, b, a_offset, b_offset, count, step, frame_index + (step/2)) + return left + [frame] + right def process_two_videos(moduleNetwork, tensorOutput, a, b, a_offset, b_offset, steps): return recurse_videos(moduleNetwork, tensorOutput, a, b, a_offset, b_offset, steps, steps, steps/2) |
