From 20662ee305cd21838078eab97568bbbbb9acffd0 Mon Sep 17 00:00:00 2001 From: jules Date: Tue, 1 May 2018 20:20:27 +0200 Subject: new scripts using canny --- .canny-single.py.swp | Bin 0 -> 12288 bytes canny-cv.py | 129 +++++++++++++++++++++++++++++++++++++++++++++++++++ canny-dir.py | 31 +++++++++++++ canny-single.py | 36 ++++++++++++++ canny-single.sh | 14 ++++++ canny.py | 111 ++++++++++++++++++++++++++++++++++++++++++++ recursive-canny.sh | 14 ++++++ recursive.sh | 6 +-- run-canny.sh | 91 ++++++++++++++++++++++++++++++++++++ run-flow.sh | 37 +++++++++++++++ run.sh | 33 ++++++------- test.py | 3 +- test.sh | 38 +++++++++++++++ util/util.py | 20 ++++++++ 14 files changed, 543 insertions(+), 20 deletions(-) create mode 100644 .canny-single.py.swp create mode 100644 canny-cv.py create mode 100644 canny-dir.py create mode 100644 canny-single.py create mode 100755 canny-single.sh create mode 100644 canny.py create mode 100755 recursive-canny.sh create mode 100755 run-canny.sh create mode 100755 run-flow.sh create mode 100755 test.sh diff --git a/.canny-single.py.swp b/.canny-single.py.swp new file mode 100644 index 0000000..9c12044 Binary files /dev/null and b/.canny-single.py.swp differ diff --git a/canny-cv.py b/canny-cv.py new file mode 100644 index 0000000..df4526a --- /dev/null +++ b/canny-cv.py @@ -0,0 +1,129 @@ +import os +from options.test_options import TestOptions +from data import CreateRecursiveDataLoader +from models import create_model +from util.visualizer import Visualizer +from util.util import mkdirs, crop_image +from util import html +from shutil import move, copyfile +from PIL import Image, ImageOps +from skimage.transform import resize +from scipy.misc import imresize +from shutil import copyfile, rmtree +import numpy as np +import cv2 +import time + +import subprocess +from time import sleep + +blur = 3 +sigma = 0 +canny_lo = 10 +canny_hi = 220 +frac_a = 0.99 +frac_b = 1 - frac_a + +if __name__ == '__main__': + opt = TestOptions().parse() + opt.nThreads = 1 # test code only supports nThreads = 1 + opt.batchSize = 1 # test code only supports batchSize = 1 + opt.serial_batches = True # no shuffle + opt.no_flip = True # no flip + opt.experiment = opt.start_img.split("/")[-1].split(".")[0] + + render_dir = opt.results_dir + opt.name + "/exp:" + opt.experiment + "/" + + if os.path.exists(render_dir): + rmtree(render_dir) + mkdirs(render_dir) + + cmd = ("convert", opt.start_img, '-canny', '0x1+10%+30%', render_dir + "frame_00000.png") + process = subprocess.Popen(cmd, stdout=subprocess.PIPE) + output, error = process.communicate() + + #copyfile(opt.start_img, render_dir + "frame_00000.png") + + data_loader = CreateRecursiveDataLoader(opt) + dataset = data_loader.load_data() + ds = dataset.dataset + model = create_model(opt) + visualizer = Visualizer(opt) + # create website + web_dir = os.path.join(opt.results_dir, opt.name, '%s_%s' % (opt.phase, opt.which_epoch)) + webpage = html.HTML(web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' % (opt.name, opt.phase, opt.which_epoch)) + # test + last_im = None + for i, data in enumerate(data_loader): + if i >= opt.how_many: + break + model.set_input(data) + model.test() + visuals = model.get_current_visuals() + img_path = model.get_image_paths() + print('%04d: process image... %s' % (i, img_path)) + ims = visualizer.save_images(webpage, visuals, img_path, aspect_ratio=opt.aspect_ratio) + if dataset.name() == 'RecursiveDatasetDataLoader': + # print(visuals.keys()) + im = visuals['fake_B'] + tmp_path = render_dir + "frame_{:05d}_tmp.png".format(i+1) + edges_path = render_dir + "frame_{:05d}.png".format(i+1) + render_path = render_dir + "ren_{:05d}.png".format(i+1) + # s = 256 + # p = 8 + # im = imresize(im, (s-p, s-p), interp='bicubic') + # image_pil = Image.fromarray(im) + # image_pil = ImageOps.expand(image_pil, p) + # image_pil.save(save_path) + # copyfile(save_path, final_path) + if last_im is not None: + tmp_im = im.copy() + #array_a = np.multiply(im.astype('float64'), frac_a) + #array_b = np.multiply(last_im.astype('float64'), frac_b) + #im = np.add(array_a, array_b).astype('uint8') + # print(im.shape, im.dtype) + last_im = np.roll(tmp_im, 1, axis=1) + else: + last_im = im.copy().astype('uint8') + tmp_im = im.copy().astype('uint8') + #print(im.shape, im.dtype) + + image_pil = Image.fromarray(tmp_im, mode='RGB') + image_pil.save(tmp_path) + os.rename(tmp_path, render_path) + + image_pil = Image.fromarray(im, mode='RGB') + image_pil = crop_image(image_pil, (0.50, 0.50), 0.5) + im = np.asarray(image_pil).astype('uint8') + #print(im.shape, im.dtype) + opencv_image = im[:, :, ::-1].copy() + opencv_image = cv2.GaussianBlur(opencv_image, (blur,blur), sigma) + opencv_image = cv2.Canny(opencv_image, canny_lo, canny_hi) + cv2.imwrite(tmp_path, opencv_image) + os.rename(tmp_path, edges_path) + + webpage.save() + + os.remove(render_dir + "frame_00000.png") + + t = time.time() + t /= 60 + t %= 525600 + video_fn = "{}_{}_canmix_{}frame_{}mix_{}blur_{}sigma_{}lo_{}hi_{}.mp4".format( + opt.name, opt.experiment, + opt.how_many, frac_a, + blur, sigma, canny_lo, canny_hi, + int(t)) + + cmd = ("/usr/bin/ffmpeg", "-i", render_dir + "ren_%05d.png", "-y", "-c:v", "libx264", "-vf", "fps=30", "-pix_fmt", "yuv420p", render_dir + video_fn) + process = subprocess.Popen(cmd, stdout=subprocess.PIPE) + output, error = process.communicate() + + print("________") + + cmd = ("scp", render_dir + video_fn, "jules@asdf.us:asdf/neural/") + process = subprocess.Popen(cmd, stdout=subprocess.PIPE) + output, error = process.communicate() + + print("https://asdf.us/neural/" + video_fn) + diff --git a/canny-dir.py b/canny-dir.py new file mode 100644 index 0000000..8a671dd --- /dev/null +++ b/canny-dir.py @@ -0,0 +1,31 @@ +import os +from options.test_options import TestOptions +from shutil import move, copyfile +from PIL import Image, ImageOps +from shutil import copyfile, rmtree +import numpy as np +import cv2 + +import subprocess +from time import sleep + +if __name__ == '__main__': + opt = TestOptions().parse() + opt.nThreads = 1 # test code only supports nThreads = 1 + opt.batchSize = 1 # test code only supports batchSize = 1 + opt.serial_batches = True # no shuffle + opt.no_flip = True # no flip + opt.experiment = opt.start_img.split("/")[-1].split(".")[0] + + render_dir = opt.results_dir + opt.name + "/exp:" + opt.experiment + "/" + + if os.path.exists(render_dir): + rmtree(render_dir) + mkdirs(render_dir) + pil_image = Image.open(opt.start_img).convert('RGB') + opencv_image = np.array(pil_image) + opencv_image = opencv_image[:, :, ::-1].copy() + opencv_image = cv2.GaussianBlur(opencv_image, (3,3), 1) + opencv_image = cv2.Canny(opencv_image, 100, 200) + cv2.imwrite(render_dir + "frame_00000.png", opencv_image) + diff --git a/canny-single.py b/canny-single.py new file mode 100644 index 0000000..749067a --- /dev/null +++ b/canny-single.py @@ -0,0 +1,36 @@ +import os +from options.test_options import TestOptions +from shutil import move, copyfile +from PIL import Image, ImageOps +from shutil import copyfile, rmtree +import numpy as np +import cv2 + +import subprocess +from time import sleep + +if __name__ == '__main__': + opt = TestOptions().parse() + opt.nThreads = 1 # test code only supports nThreads = 1 + opt.batchSize = 1 # test code only supports batchSize = 1 + opt.serial_batches = True # no shuffle + opt.no_flip = True # no flip + opt.experiment = opt.start_img + + render_dir = opt.results_dir + opt.experiment + "/" + + if os.path.exists(render_dir): + rmtree(render_dir) + mkdirs(render_dir) + i = 0 + for f in sorted(os.listdir(opt.start_img)): + if not os.path.isfile(f): + continue + pil_image = Image.open(f).convert('RGB') + opencv_image = np.array(pil_image) + opencv_image = opencv_image[:, :, ::-1].copy() + opencv_image = cv2.GaussianBlur(opencv_image, (3,3), 1) + opencv_image = cv2.Canny(opencv_image, 100, 200) + cv2.imwrite(render_dir + "frame_{:04d}.png".format(i), opencv_image) + i += 1 + diff --git a/canny-single.sh b/canny-single.sh new file mode 100755 index 0000000..7ad010f --- /dev/null +++ b/canny-single.sh @@ -0,0 +1,14 @@ +python canny-single.py \ + --dataroot /home/ubuntu/Desktop/thumbs/wood/B/train/ \ + --name wood \ + --start_img /home/ubuntu/Desktop/thumbs/wood/B/train/frame_00100.png \ + --loadSize 256 \ + --fineSize 256 \ + --how_many 30 \ + --model test \ + --which_model_netG unet_256 \ + --which_direction AtoB \ + --dataset_mode recursive \ + --norm batch + +# --aspect_ratio 1.777777 \ diff --git a/canny.py b/canny.py new file mode 100644 index 0000000..b8dc6e8 --- /dev/null +++ b/canny.py @@ -0,0 +1,111 @@ +import os +from options.test_options import TestOptions +from data import CreateRecursiveDataLoader +from models import create_model +from util.visualizer import Visualizer +from util.util import mkdirs +from util import html +from shutil import move, copyfile +from PIL import Image, ImageOps +from skimage.transform import resize +from scipy.misc import imresize +from shutil import copyfile, rmtree +import time as time +import numpy as np +import cv2 + +import subprocess +from time import sleep + +if __name__ == '__main__': + opt = TestOptions().parse() + opt.nThreads = 1 # test code only supports nThreads = 1 + opt.batchSize = 1 # test code only supports batchSize = 1 + opt.serial_batches = True # no shuffle + opt.no_flip = True # no flip + opt.experiment = opt.start_img.split("/")[-1].split(".")[0] + + render_dir = opt.results_dir + opt.name + "/exp:" + opt.experiment + "/" + + if os.path.exists(render_dir): + rmtree(render_dir) + mkdirs(render_dir) + + cmd = ("convert", opt.start_img, '-canny', '0x1+10%+30%', render_dir + "frame_00000.png") + process = subprocess.Popen(cmd, stdout=subprocess.PIPE) + output, error = process.communicate() + + data_loader = CreateRecursiveDataLoader(opt) + dataset = data_loader.load_data() + ds = dataset.dataset + model = create_model(opt) + visualizer = Visualizer(opt) + # create website + web_dir = os.path.join(opt.results_dir, opt.name, '%s_%s' % (opt.phase, opt.which_epoch)) + webpage = html.HTML(web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' % (opt.name, opt.phase, opt.which_epoch)) + # test + last_im = None + for i, data in enumerate(data_loader): + if i >= opt.how_many: + break + model.set_input(data) + model.test() + visuals = model.get_current_visuals() + img_path = model.get_image_paths() + print('%04d: process image... %s' % (i, img_path)) + ims = visualizer.save_images(webpage, visuals, img_path, aspect_ratio=opt.aspect_ratio) + if dataset.name() == 'RecursiveDatasetDataLoader': + # print(visuals.keys()) + im = visuals['fake_B'] + tmp_path = render_dir + "frame_{:05d}_tmp.png".format(i+1) + edges_path = render_dir + "frame_{:05d}.png".format(i+1) + render_path = render_dir + "ren_{:05d}.png".format(i+1) + # s = 256 + # p = 8 + # im = imresize(im, (s-p, s-p), interp='bicubic') + # image_pil = Image.fromarray(im) + # image_pil = ImageOps.expand(image_pil, p) + # image_pil.save(save_path) + # copyfile(save_path, final_path) + #if last_im is not None: + # frac_a = 999/1000 + # frac_b = 1/1000 + # tmp_im = im.copy() + # array_a = np.multiply(im, frac_a) + # array_b = np.multiply(last_im, frac_b) + # # im = np.add(array_a, array_b).astype('int8') + # # print(im.shape, im.dtype) + # last_im = np.roll(tmp_im, 1, axis=1) + #else: + # last_im = im.copy() + # print(im.shape, im.dtype) + image_pil = Image.fromarray(im, mode='RGB') + image_pil.save(tmp_path) + os.rename(tmp_path, render_path) + + cmd = ("convert", render_path, '-canny', '0x1+10%+30%', tmp_path) + process = subprocess.Popen(cmd, stdout=subprocess.PIPE) + output, error = process.communicate() + + os.rename(tmp_path, edges_path) + + webpage.save() + + os.remove(render_dir + "frame_00000.png") + + t = time.time() + t /= 60 + t %= 525600 + video_fn = opt.name + "_" + opt.experiment + "_canny_" + str(opt.how_many) + "_" + str(int(t)) + ".mp4" + + cmd = ("/usr/bin/ffmpeg", "-i", render_dir + "ren_%05d.png", "-y", "-c:v", "libx264", "-vf", "fps=30", "-pix_fmt", "yuv420p", render_dir + video_fn) + process = subprocess.Popen(cmd, stdout=subprocess.PIPE) + output, error = process.communicate() + + print("________") + + cmd = ("scp", render_dir + video_fn, "jules@asdf.us:asdf/neural/") + process = subprocess.Popen(cmd, stdout=subprocess.PIPE) + output, error = process.communicate() + + print("https://asdf.us/neural/" + video_fn) diff --git a/recursive-canny.sh b/recursive-canny.sh new file mode 100755 index 0000000..0150438 --- /dev/null +++ b/recursive-canny.sh @@ -0,0 +1,14 @@ +python canny-cv.py \ + --dataroot /home/ubuntu/Desktop/thumbs/wood/B/train/ \ + --name woodcanny \ + --start_img /home/ubuntu/Desktop/thumbs/wood/B/train/frame_00100.png \ + --loadSize 256 \ + --fineSize 256 \ + --how_many 200 \ + --model test \ + --which_model_netG unet_256 \ + --which_direction AtoB \ + --dataset_mode recursive \ + --norm batch + +# --aspect_ratio 1.777777 \ diff --git a/recursive.sh b/recursive.sh index daffba1..6ccc60d 100755 --- a/recursive.sh +++ b/recursive.sh @@ -1,7 +1,7 @@ python test.py \ - --dataroot /home/lens/Desktop/thumbs/woodscaled/A/train/ \ - --name woodscaled_pix2pix \ - --start_img /home/lens/Desktop/thumbs/woodscaled/A/train/frame_1002.png \ + --dataroot /home/ubuntu/Desktop/thumbs/wood/A/train/ \ + --name wood \ + --start_img /home/ubuntu/Desktop/thumbs/wood/A/train/frame_1002.png \ --how_many 2000 \ --model test \ --which_model_netG unet_256 \ diff --git a/run-canny.sh b/run-canny.sh new file mode 100755 index 0000000..aef1118 --- /dev/null +++ b/run-canny.sh @@ -0,0 +1,91 @@ +dataset="woodcanny" + +#/home/lens/Desktop/dataset-random.pl + +#python datasets/combine_A_and_B.py \ +# --fold_A "$HOME/Desktop/thumbs/$dataset/A" \ +# --fold_B "$HOME/Desktop/thumbs/$dataset/B" \ +# --fold_AB "./datasets/$dataset/" + +python train.py \ + --dataroot "./datasets/$dataset" \ + --name "$dataset" \ + --model pix2pix \ + --loadSize 276 \ + --fineSize 256 \ + --which_model_netG unet_256 \ + --which_direction AtoB \ + --lambda_B 100 \ + --dataset_mode aligned \ + --which_epoch latest \ + --continue_train \ + --no_lsgan --norm batch --pool_size 0 + +python canny-cv.py \ + --dataroot /home/ubuntu/Desktop/thumbs/wood/B/train/ \ + --name woodcanny \ + --start_img /home/ubuntu/Desktop/thumbs/wood/B/train/frame_00100.png \ + --loadSize 256 \ + --fineSize 256 \ + --how_many 200 \ + --model test \ + --which_model_netG unet_256 \ + --which_direction AtoB \ + --dataset_mode recursive \ + --norm batch + +python train.py \ + --dataroot "./datasets/$dataset" \ + --name "$dataset" \ + --model pix2pix \ + --loadSize 276 \ + --fineSize 256 \ + --which_model_netG unet_256 \ + --which_direction AtoB \ + --lambda_B 100 \ + --dataset_mode aligned \ + --which_epoch latest \ + --continue_train \ + --no_lsgan --norm batch --pool_size 0 + +python canny-cv.py \ + --dataroot /home/ubuntu/Desktop/thumbs/wood/B/train/ \ + --name woodcanny \ + --start_img /home/ubuntu/Desktop/thumbs/wood/B/train/frame_00100.png \ + --loadSize 256 \ + --fineSize 256 \ + --how_many 200 \ + --model test \ + --which_model_netG unet_256 \ + --which_direction AtoB \ + --dataset_mode recursive \ + --norm batch + +python train.py \ + --dataroot "./datasets/$dataset" \ + --name "$dataset" \ + --model pix2pix \ + --loadSize 276 \ + --fineSize 256 \ + --which_model_netG unet_256 \ + --which_direction AtoB \ + --lambda_B 100 \ + --dataset_mode aligned \ + --which_epoch latest \ + --continue_train \ + --no_lsgan --norm batch --pool_size 0 + +python canny-cv.py \ + --dataroot /home/ubuntu/Desktop/thumbs/wood/B/train/ \ + --name woodcanny \ + --start_img /home/ubuntu/Desktop/thumbs/wood/B/train/frame_00100.png \ + --loadSize 256 \ + --fineSize 256 \ + --how_many 200 \ + --model test \ + --which_model_netG unet_256 \ + --which_direction AtoB \ + --dataset_mode recursive \ + --norm batch + +# --aspect_ratio 1.777777 \ diff --git a/run-flow.sh b/run-flow.sh new file mode 100755 index 0000000..7b7ac4f --- /dev/null +++ b/run-flow.sh @@ -0,0 +1,37 @@ +dataset="woodflow" + +#/home/lens/Desktop/dataset-random.pl + +#python datasets/combine_A_and_B.py \ +# --fold_A "$HOME/Desktop/thumbs/$dataset/A" \ +# --fold_B "$HOME/Desktop/thumbs/$dataset/B" \ +# --fold_AB "./datasets/$dataset/" + +python train.py \ + --dataroot "./datasets/$dataset" \ + --name "$dataset" \ + --model pix2pix \ + --loadSize 276 \ + --fineSize 256 \ + --which_model_netG unet_256 \ + --which_direction AtoB \ + --lambda_B 100 \ + --dataset_mode aligned \ + --no_lsgan --norm batch --pool_size 0 + +# --continue_train \ +# --which_epoch latest \ + +#python canny-cv.py \ +# --dataroot /home/ubuntu/Desktop/thumbs/wood/B/train/ \ +# --name woodcanny \ +# --start_img /home/ubuntu/Desktop/thumbs/wood/B/train/frame_00100.png \ +# --loadSize 256 \ +# --fineSize 256 \ +# --how_many 200 \ +# --model test \ +# --which_model_netG unet_256 \ +# --which_direction AtoB \ +# --dataset_mode recursive \ +# --norm batch + diff --git a/run.sh b/run.sh index 43d14e2..c02972d 100755 --- a/run.sh +++ b/run.sh @@ -1,24 +1,25 @@ -dataset="randomcrops5k" +dataset="woodcanny" #/home/lens/Desktop/dataset-random.pl -# + #python datasets/combine_A_and_B.py \ -# --fold_A "/home/lens/Desktop/thumbs/$dataset/A" \ -# --fold_B "/home/lens/Desktop/thumbs/$dataset/B" \ +# --fold_A "$HOME/Desktop/thumbs/$dataset/A" \ +# --fold_B "$HOME/Desktop/thumbs/$dataset/B" \ # --fold_AB "./datasets/$dataset/" -# python train.py \ -# --dataroot "./datasets/$dataset" \ -# --name "$dataset" \ -# --model pix2pix \ -# --loadSize 256 \ -# --fineSize 256 \ -# --which_model_netG unet_256 \ -# --which_direction AtoB \ -# --lambda_B 100 \ -# --dataset_mode aligned \ -# --no_lsgan --norm batch --pool_size 0 \ -# --continue_train +python train.py \ + --dataroot "./datasets/$dataset" \ + --name "$dataset" \ + --model pix2pix \ + --loadSize 276 \ + --fineSize 256 \ + --which_model_netG unet_256 \ + --which_direction AtoB \ + --lambda_B 100 \ + --dataset_mode aligned \ + --which_epoch latest \ + --continue_train \ + --no_lsgan --norm batch --pool_size 0 python test.py \ --dataroot "/home/lens/Desktop/thumbs/$dataset/A/train/" \ diff --git a/test.py b/test.py index a53932d..1f82875 100644 --- a/test.py +++ b/test.py @@ -25,7 +25,8 @@ if __name__ == '__main__': render_dir = opt.results_dir + opt.name + "/exp:" + opt.experiment + "/" - rmtree(render_dir) + if os.path.exists(render_dir): + rmtree(render_dir) mkdirs(render_dir) copyfile(opt.start_img, render_dir + "frame_00000.png") diff --git a/test.sh b/test.sh new file mode 100755 index 0000000..c25c987 --- /dev/null +++ b/test.sh @@ -0,0 +1,38 @@ +dataset="wood" + +#/home/lens/Desktop/dataset-random.pl +# +#python datasets/combine_A_and_B.py \ +# --fold_A "$HOME/Desktop/thumbs/$dataset/A" \ +# --fold_B "$HOME/Desktop/thumbs/$dataset/B" \ +# --fold_AB "./datasets/$dataset/" + +# python train.py \ +# --dataroot "./datasets/$dataset" \ +# --name "$dataset" \ +# --model pix2pix \ +# --loadSize 256 \ +# --fineSize 256 \ +# --which_model_netG unet_256 \ +# --which_direction AtoB \ +# --lambda_B 100 \ +# --dataset_mode aligned \ +# --which_epoch latest \ +# --no_lsgan --norm batch --pool_size 0 +# --continue_train \ + +python test.py \ + --dataroot "/home/lens/Desktop/thumbs/$dataset/A/train/" \ + --name "$dataset" \ + --start_img "/home/lens/Desktop/thumbs/$dataset/A/train/frame_1008.png" \ + --how_many 1000 \ + --model test \ + --aspect_ratio 1.777777 \ + --which_model_netG unet_256 \ + --which_direction AtoB \ + --dataset_mode test \ + --loadSize 256 \ + --fineSize 256 \ + --norm batch + + diff --git a/util/util.py b/util/util.py index 7a452a6..1c03715 100644 --- a/util/util.py +++ b/util/util.py @@ -54,3 +54,23 @@ def mkdirs(paths): def mkdir(path): if not os.path.exists(path): os.makedirs(path) + +def crop_image(img, xy, scale_factor): + '''Crop the image around the tuple xy + + Inputs: + ------- + img: Image opened with PIL.Image + xy: tuple with relative (x,y) position of the center of the cropped image + x and y shall be between 0 and 1 + scale_factor: the ratio between the original image's size and the cropped image's size + ''' + center = (img.size[0] * xy[0], img.size[1] * xy[1]) + new_size = (img.size[0] / scale_factor, img.size[1] / scale_factor) + left = max (0, (int) (center[0] - new_size[0] / 2)) + right = min (img.size[0], (int) (center[0] + new_size[0] / 2)) + upper = max (0, (int) (center[1] - new_size[1] / 2)) + lower = min (img.size[1], (int) (center[1] + new_size[1] / 2)) + cropped_img = img.crop((left, upper, right, lower)) + return cropped_img + -- cgit v1.2.3-70-g09d2 From 956a336c0d1f6ce34cda072b542fd483f241a673 Mon Sep 17 00:00:00 2001 From: jules Date: Tue, 1 May 2018 20:28:52 +0200 Subject: dataset build scripts --- scripts/builders/canny-dir.py | 40 +++++++++++++++++++++ scripts/builders/flow-dir.py | 75 ++++++++++++++++++++++++++++++++++++++++ scripts/builders/pair-dataset.pl | 55 +++++++++++++++++++++++++++++ 3 files changed, 170 insertions(+) create mode 100644 scripts/builders/canny-dir.py create mode 100644 scripts/builders/flow-dir.py create mode 100644 scripts/builders/pair-dataset.pl diff --git a/scripts/builders/canny-dir.py b/scripts/builders/canny-dir.py new file mode 100644 index 0000000..ae1bd62 --- /dev/null +++ b/scripts/builders/canny-dir.py @@ -0,0 +1,40 @@ +import os +from shutil import move, copyfile +from PIL import Image, ImageOps +from shutil import copyfile, rmtree +import numpy as np +import cv2 + +work_dir = "woodsized/" +render_dir = "woodcanny/" + +if os.path.exists(render_dir): + rmtree(render_dir) + +os.makedirs(render_dir) +os.makedirs(render_dir + "A/") +os.makedirs(render_dir + "A/train/") +os.makedirs(render_dir + "A/test/") +os.makedirs(render_dir + "A/val/") +os.makedirs(render_dir + "B/") +os.makedirs(render_dir + "B/train/") +os.makedirs(render_dir + "B/test/") +os.makedirs(render_dir + "B/val/") + +for i,fn in enumerate(sorted(os.listdir(work_dir))): + pil_image = Image.open(work_dir + fn).convert('RGB') + opencv_image = np.array(pil_image) + opencv_image = opencv_image[:, :, ::-1].copy() + opencv_image = cv2.GaussianBlur(opencv_image, (3,3), 1) + opencv_image = cv2.Canny(opencv_image, 100, 200) + ren = "frame_{:05d}.png".format(i) + if (i % 10) == 3: + wd = "test/" + elif (i % 10) == 6: + wd = "val/" + else: + wd = "train/" + cv2.imwrite(render_dir + "A/" + wd + ren, opencv_image) + copyfile(work_dir + fn, render_dir + "B/" + wd + ren) + + diff --git a/scripts/builders/flow-dir.py b/scripts/builders/flow-dir.py new file mode 100644 index 0000000..da35fbd --- /dev/null +++ b/scripts/builders/flow-dir.py @@ -0,0 +1,75 @@ +import os +import sys +from shutil import move, copyfile +from PIL import Image, ImageOps +from shutil import copyfile, rmtree +import numpy as np +import cv2 + +work_dir = "woodsized/" +render_dir = "woodflow/" + +if os.path.exists(render_dir): + rmtree(render_dir) + +os.makedirs(render_dir) +os.makedirs(render_dir + "A/") +os.makedirs(render_dir + "A/train/") +os.makedirs(render_dir + "A/test/") +os.makedirs(render_dir + "A/val/") +os.makedirs(render_dir + "B/") +os.makedirs(render_dir + "B/train/") +os.makedirs(render_dir + "B/test/") +os.makedirs(render_dir + "B/val/") + +hsv = [] + +for i,fn in enumerate(sorted(os.listdir(work_dir))): + # load image and convert to grayscale + pil_image = Image.open(work_dir + fn).convert('RGB') + im = np.array(pil_image) + im = im[:, :, ::-1].copy() + im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY) + + # store first frame + if i == 0: + prev = im + hsv = np.zeros((256,256,3)) + hsv[...,1] = 255 + continue + + # compute optical flow + flow = cv2.calcOpticalFlowFarneback(prev, im, None, 0.5, 3, 15, 3, 5, 1.2, 0) + + # turn into magnitude/angle + mag, ang = cv2.cartToPolar(flow[...,0], flow[...,1]) + + # store angle as hue + hsv[...,0] = ang * 180 / np.pi / 2 + + # store magnitude as lum + hsv[...,2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX) + + # convert this HSL to BGR + bgr = cv2.cvtColor(hsv.astype('uint8'), cv2.COLOR_HSV2BGR) + + ren = "frame_{:05d}.png".format(i) + if (i % 10) == 3: + wd = "test/" + elif (i % 10) == 6: + wd = "val/" + else: + wd = "train/" + + if i == 1: + prev_hsv = np.copy(hsv) + prev_bgr = np.copy(hsv) + continue + + cv2.imwrite(render_dir + "A/" + wd + ren, prev_bgr) + cv2.imwrite(render_dir + "B/" + wd + ren, bgr) + # copyfile(work_dir + fn, render_dir + "B/" + wd + ren) + prev = im + prev_hsv = hsv + prev_bgr = hsv + diff --git a/scripts/builders/pair-dataset.pl b/scripts/builders/pair-dataset.pl new file mode 100644 index 0000000..06c40c0 --- /dev/null +++ b/scripts/builders/pair-dataset.pl @@ -0,0 +1,55 @@ +#!/usr/bin/perl + +use strict; + +our $images_dir = "/home/ubuntu/Desktop/wood/"; +our $thumbs_dir = "/home/ubuntu/Desktop/thumbs/wood/"; + +our $dt = 1; + +mkdir($thumbs_dir); +mkdir($thumbs_dir . "A/"); +mkdir($thumbs_dir . "B/"); +mkdir($thumbs_dir . "A/train/"); +mkdir($thumbs_dir . "B/train/"); +mkdir($thumbs_dir . "A/val/"); +mkdir($thumbs_dir . "B/val/"); +mkdir($thumbs_dir . "A/test/"); +mkdir($thumbs_dir . "B/test/"); + +our @files = (); + +opendir DIR, $images_dir; +while (readdir DIR) { + next if /^\./; + push(@files, $_); +} +closedir DIR; + +our @images = sort @files; + +my $count = scalar(@images) - $dt; + +my $i; my $x; my $y; my $dir; +for ($i = 0; $i < $count; $i++) { + my $id = $i; + if ( ($id % 7) == 3) { + $dir = "test/"; + } elsif ( ($id % 7) == 6) { + $dir = "val/"; + } else { + $dir = "train/"; + } + if ( $i && ($i % 1000) == 0) { + print($id . "...\n") + } + + my $a_frame = $images_dir . $images[$id]; + my $b_frame = $images_dir . $images[$id+$dt]; + my $fn = sprintf("frame_%05d.png", $i); + #print "$fn $x $y $a_frame\n"; + system("convert", $a_frame, "-resize", '256x256!', '-canny', '0x1+10%+30%', $thumbs_dir . "A/" . $dir . $fn); + system("convert", $b_frame, "-resize", '256x256!', $thumbs_dir . "B/" . $dir . $fn); +} +print $count . "\n"; + -- cgit v1.2.3-70-g09d2 From f8d0c6bd65a827be534d2d105917829fcb4a0f82 Mon Sep 17 00:00:00 2001 From: Jules Laplace Date: Tue, 1 May 2018 21:35:30 +0200 Subject: fix flowdir script --- scripts/builders/flow-dir.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/scripts/builders/flow-dir.py b/scripts/builders/flow-dir.py index da35fbd..620ba32 100644 --- a/scripts/builders/flow-dir.py +++ b/scripts/builders/flow-dir.py @@ -63,13 +63,13 @@ for i,fn in enumerate(sorted(os.listdir(work_dir))): if i == 1: prev_hsv = np.copy(hsv) - prev_bgr = np.copy(hsv) + prev_bgr = np.copy(bgr) continue cv2.imwrite(render_dir + "A/" + wd + ren, prev_bgr) cv2.imwrite(render_dir + "B/" + wd + ren, bgr) # copyfile(work_dir + fn, render_dir + "B/" + wd + ren) prev = im - prev_hsv = hsv - prev_bgr = hsv + prev_hsv = np.copy(hsv) + prev_bgr = np.copy(bgr) -- cgit v1.2.3-70-g09d2 From 5edd1efbc7b3c02d16b23401cc47a88f50fdf4d5 Mon Sep 17 00:00:00 2001 From: Jules Laplace Date: Tue, 1 May 2018 22:43:23 +0200 Subject: test canny? --- canny-cv.py | 29 ++++++++++++++++------------- test-canny.sh | 11 +++++++++++ 2 files changed, 27 insertions(+), 13 deletions(-) create mode 100644 test-canny.sh diff --git a/canny-cv.py b/canny-cv.py index df4526a..f8b5dc3 100644 --- a/canny-cv.py +++ b/canny-cv.py @@ -63,12 +63,14 @@ if __name__ == '__main__': img_path = model.get_image_paths() print('%04d: process image... %s' % (i, img_path)) ims = visualizer.save_images(webpage, visuals, img_path, aspect_ratio=opt.aspect_ratio) + + im = visuals['fake_B'] + tmp_path = render_dir + "frame_{:05d}_tmp.png".format(i+1) + edges_path = render_dir + "frame_{:05d}.png".format(i+1) + render_path = render_dir + "ren_{:05d}.png".format(i+1) + if dataset.name() == 'RecursiveDatasetDataLoader': # print(visuals.keys()) - im = visuals['fake_B'] - tmp_path = render_dir + "frame_{:05d}_tmp.png".format(i+1) - edges_path = render_dir + "frame_{:05d}.png".format(i+1) - render_path = render_dir + "ren_{:05d}.png".format(i+1) # s = 256 # p = 8 # im = imresize(im, (s-p, s-p), interp='bicubic') @@ -92,15 +94,16 @@ if __name__ == '__main__': image_pil.save(tmp_path) os.rename(tmp_path, render_path) - image_pil = Image.fromarray(im, mode='RGB') - image_pil = crop_image(image_pil, (0.50, 0.50), 0.5) - im = np.asarray(image_pil).astype('uint8') - #print(im.shape, im.dtype) - opencv_image = im[:, :, ::-1].copy() - opencv_image = cv2.GaussianBlur(opencv_image, (blur,blur), sigma) - opencv_image = cv2.Canny(opencv_image, canny_lo, canny_hi) - cv2.imwrite(tmp_path, opencv_image) - os.rename(tmp_path, edges_path) + image_pil = Image.fromarray(im, mode='RGB') + image_pil = crop_image(image_pil, (0.50, 0.50), 0.5) + im = np.asarray(image_pil).astype('uint8') + #print(im.shape, im.dtype) + opencv_image = im[:, :, ::-1].copy() + opencv_image = cv2.GaussianBlur(opencv_image, (blur,blur), sigma) + opencv_image = cv2.Canny(opencv_image, canny_lo, canny_hi) + cv2.imwrite(tmp_path, opencv_image) + os.rename(tmp_path, edges_path) + webpage.save() diff --git a/test-canny.sh b/test-canny.sh new file mode 100644 index 0000000..57b56b6 --- /dev/null +++ b/test-canny.sh @@ -0,0 +1,11 @@ +python canny-cv.py \ + --dataroot /home/ubuntu/Desktop/thumbs/wood/B/train/ \ + --name woodcanny \ + --loadSize 256 \ + --fineSize 256 \ + --how_many 200 \ + --model test \ + --which_model_netG unet_256 \ + --which_direction AtoB \ + --dataset_mode aligned \ + --norm batch -- cgit v1.2.3-70-g09d2 From 70cbf455566a2302353ce1951154f97c8007c345 Mon Sep 17 00:00:00 2001 From: jules Date: Tue, 1 May 2018 22:49:30 +0200 Subject: woodflowfixed --- run-flow.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/run-flow.sh b/run-flow.sh index 7b7ac4f..858be31 100755 --- a/run-flow.sh +++ b/run-flow.sh @@ -1,4 +1,4 @@ -dataset="woodflow" +dataset="woodflowfixed" #/home/lens/Desktop/dataset-random.pl @@ -6,7 +6,7 @@ dataset="woodflow" # --fold_A "$HOME/Desktop/thumbs/$dataset/A" \ # --fold_B "$HOME/Desktop/thumbs/$dataset/B" \ # --fold_AB "./datasets/$dataset/" - +# python train.py \ --dataroot "./datasets/$dataset" \ --name "$dataset" \ -- cgit v1.2.3-70-g09d2 From 06e7bea3951a223fff7d0432e5489e4b672d6428 Mon Sep 17 00:00:00 2001 From: jules Date: Wed, 9 May 2018 21:23:10 +0200 Subject: run flow dataset --- .gitignore | 2 ++ run-flow.sh | 7 +++---- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/.gitignore b/.gitignore index 7290625..ef552b4 100644 --- a/.gitignore +++ b/.gitignore @@ -42,3 +42,5 @@ test/data/legacy_serialized.pt *~ .idea recursive/ +*.swp + diff --git a/run-flow.sh b/run-flow.sh index 858be31..3f650dc 100755 --- a/run-flow.sh +++ b/run-flow.sh @@ -6,7 +6,7 @@ dataset="woodflowfixed" # --fold_A "$HOME/Desktop/thumbs/$dataset/A" \ # --fold_B "$HOME/Desktop/thumbs/$dataset/B" \ # --fold_AB "./datasets/$dataset/" -# + python train.py \ --dataroot "./datasets/$dataset" \ --name "$dataset" \ @@ -17,11 +17,10 @@ python train.py \ --which_direction AtoB \ --lambda_B 100 \ --dataset_mode aligned \ + --continue_train \ + --which_epoch latest \ --no_lsgan --norm batch --pool_size 0 -# --continue_train \ -# --which_epoch latest \ - #python canny-cv.py \ # --dataroot /home/ubuntu/Desktop/thumbs/wood/B/train/ \ # --name woodcanny \ -- cgit v1.2.3-70-g09d2