summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.canny-single.py.swpbin0 -> 12288 bytes
-rw-r--r--canny-cv.py129
-rw-r--r--canny-dir.py31
-rw-r--r--canny-single.py36
-rwxr-xr-xcanny-single.sh14
-rw-r--r--canny.py111
-rwxr-xr-xrecursive-canny.sh14
-rwxr-xr-xrecursive.sh6
-rwxr-xr-xrun-canny.sh91
-rwxr-xr-xrun-flow.sh37
-rwxr-xr-xrun.sh33
-rw-r--r--test.py3
-rwxr-xr-xtest.sh38
-rw-r--r--util/util.py20
14 files changed, 543 insertions, 20 deletions
diff --git a/.canny-single.py.swp b/.canny-single.py.swp
new file mode 100644
index 0000000..9c12044
--- /dev/null
+++ b/.canny-single.py.swp
Binary files differ
diff --git a/canny-cv.py b/canny-cv.py
new file mode 100644
index 0000000..df4526a
--- /dev/null
+++ b/canny-cv.py
@@ -0,0 +1,129 @@
+import os
+from options.test_options import TestOptions
+from data import CreateRecursiveDataLoader
+from models import create_model
+from util.visualizer import Visualizer
+from util.util import mkdirs, crop_image
+from util import html
+from shutil import move, copyfile
+from PIL import Image, ImageOps
+from skimage.transform import resize
+from scipy.misc import imresize
+from shutil import copyfile, rmtree
+import numpy as np
+import cv2
+import time
+
+import subprocess
+from time import sleep
+
+blur = 3
+sigma = 0
+canny_lo = 10
+canny_hi = 220
+frac_a = 0.99
+frac_b = 1 - frac_a
+
+if __name__ == '__main__':
+ opt = TestOptions().parse()
+ opt.nThreads = 1 # test code only supports nThreads = 1
+ opt.batchSize = 1 # test code only supports batchSize = 1
+ opt.serial_batches = True # no shuffle
+ opt.no_flip = True # no flip
+ opt.experiment = opt.start_img.split("/")[-1].split(".")[0]
+
+ render_dir = opt.results_dir + opt.name + "/exp:" + opt.experiment + "/"
+
+ if os.path.exists(render_dir):
+ rmtree(render_dir)
+ mkdirs(render_dir)
+
+ cmd = ("convert", opt.start_img, '-canny', '0x1+10%+30%', render_dir + "frame_00000.png")
+ process = subprocess.Popen(cmd, stdout=subprocess.PIPE)
+ output, error = process.communicate()
+
+ #copyfile(opt.start_img, render_dir + "frame_00000.png")
+
+ data_loader = CreateRecursiveDataLoader(opt)
+ dataset = data_loader.load_data()
+ ds = dataset.dataset
+ model = create_model(opt)
+ visualizer = Visualizer(opt)
+ # create website
+ web_dir = os.path.join(opt.results_dir, opt.name, '%s_%s' % (opt.phase, opt.which_epoch))
+ webpage = html.HTML(web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' % (opt.name, opt.phase, opt.which_epoch))
+ # test
+ last_im = None
+ for i, data in enumerate(data_loader):
+ if i >= opt.how_many:
+ break
+ model.set_input(data)
+ model.test()
+ visuals = model.get_current_visuals()
+ img_path = model.get_image_paths()
+ print('%04d: process image... %s' % (i, img_path))
+ ims = visualizer.save_images(webpage, visuals, img_path, aspect_ratio=opt.aspect_ratio)
+ if dataset.name() == 'RecursiveDatasetDataLoader':
+ # print(visuals.keys())
+ im = visuals['fake_B']
+ tmp_path = render_dir + "frame_{:05d}_tmp.png".format(i+1)
+ edges_path = render_dir + "frame_{:05d}.png".format(i+1)
+ render_path = render_dir + "ren_{:05d}.png".format(i+1)
+ # s = 256
+ # p = 8
+ # im = imresize(im, (s-p, s-p), interp='bicubic')
+ # image_pil = Image.fromarray(im)
+ # image_pil = ImageOps.expand(image_pil, p)
+ # image_pil.save(save_path)
+ # copyfile(save_path, final_path)
+ if last_im is not None:
+ tmp_im = im.copy()
+ #array_a = np.multiply(im.astype('float64'), frac_a)
+ #array_b = np.multiply(last_im.astype('float64'), frac_b)
+ #im = np.add(array_a, array_b).astype('uint8')
+ # print(im.shape, im.dtype)
+ last_im = np.roll(tmp_im, 1, axis=1)
+ else:
+ last_im = im.copy().astype('uint8')
+ tmp_im = im.copy().astype('uint8')
+ #print(im.shape, im.dtype)
+
+ image_pil = Image.fromarray(tmp_im, mode='RGB')
+ image_pil.save(tmp_path)
+ os.rename(tmp_path, render_path)
+
+ image_pil = Image.fromarray(im, mode='RGB')
+ image_pil = crop_image(image_pil, (0.50, 0.50), 0.5)
+ im = np.asarray(image_pil).astype('uint8')
+ #print(im.shape, im.dtype)
+ opencv_image = im[:, :, ::-1].copy()
+ opencv_image = cv2.GaussianBlur(opencv_image, (blur,blur), sigma)
+ opencv_image = cv2.Canny(opencv_image, canny_lo, canny_hi)
+ cv2.imwrite(tmp_path, opencv_image)
+ os.rename(tmp_path, edges_path)
+
+ webpage.save()
+
+ os.remove(render_dir + "frame_00000.png")
+
+ t = time.time()
+ t /= 60
+ t %= 525600
+ video_fn = "{}_{}_canmix_{}frame_{}mix_{}blur_{}sigma_{}lo_{}hi_{}.mp4".format(
+ opt.name, opt.experiment,
+ opt.how_many, frac_a,
+ blur, sigma, canny_lo, canny_hi,
+ int(t))
+
+ cmd = ("/usr/bin/ffmpeg", "-i", render_dir + "ren_%05d.png", "-y", "-c:v", "libx264", "-vf", "fps=30", "-pix_fmt", "yuv420p", render_dir + video_fn)
+ process = subprocess.Popen(cmd, stdout=subprocess.PIPE)
+ output, error = process.communicate()
+
+ print("________")
+
+ cmd = ("scp", render_dir + video_fn, "jules@asdf.us:asdf/neural/")
+ process = subprocess.Popen(cmd, stdout=subprocess.PIPE)
+ output, error = process.communicate()
+
+ print("https://asdf.us/neural/" + video_fn)
+
diff --git a/canny-dir.py b/canny-dir.py
new file mode 100644
index 0000000..8a671dd
--- /dev/null
+++ b/canny-dir.py
@@ -0,0 +1,31 @@
+import os
+from options.test_options import TestOptions
+from shutil import move, copyfile
+from PIL import Image, ImageOps
+from shutil import copyfile, rmtree
+import numpy as np
+import cv2
+
+import subprocess
+from time import sleep
+
+if __name__ == '__main__':
+ opt = TestOptions().parse()
+ opt.nThreads = 1 # test code only supports nThreads = 1
+ opt.batchSize = 1 # test code only supports batchSize = 1
+ opt.serial_batches = True # no shuffle
+ opt.no_flip = True # no flip
+ opt.experiment = opt.start_img.split("/")[-1].split(".")[0]
+
+ render_dir = opt.results_dir + opt.name + "/exp:" + opt.experiment + "/"
+
+ if os.path.exists(render_dir):
+ rmtree(render_dir)
+ mkdirs(render_dir)
+ pil_image = Image.open(opt.start_img).convert('RGB')
+ opencv_image = np.array(pil_image)
+ opencv_image = opencv_image[:, :, ::-1].copy()
+ opencv_image = cv2.GaussianBlur(opencv_image, (3,3), 1)
+ opencv_image = cv2.Canny(opencv_image, 100, 200)
+ cv2.imwrite(render_dir + "frame_00000.png", opencv_image)
+
diff --git a/canny-single.py b/canny-single.py
new file mode 100644
index 0000000..749067a
--- /dev/null
+++ b/canny-single.py
@@ -0,0 +1,36 @@
+import os
+from options.test_options import TestOptions
+from shutil import move, copyfile
+from PIL import Image, ImageOps
+from shutil import copyfile, rmtree
+import numpy as np
+import cv2
+
+import subprocess
+from time import sleep
+
+if __name__ == '__main__':
+ opt = TestOptions().parse()
+ opt.nThreads = 1 # test code only supports nThreads = 1
+ opt.batchSize = 1 # test code only supports batchSize = 1
+ opt.serial_batches = True # no shuffle
+ opt.no_flip = True # no flip
+ opt.experiment = opt.start_img
+
+ render_dir = opt.results_dir + opt.experiment + "/"
+
+ if os.path.exists(render_dir):
+ rmtree(render_dir)
+ mkdirs(render_dir)
+ i = 0
+ for f in sorted(os.listdir(opt.start_img)):
+ if not os.path.isfile(f):
+ continue
+ pil_image = Image.open(f).convert('RGB')
+ opencv_image = np.array(pil_image)
+ opencv_image = opencv_image[:, :, ::-1].copy()
+ opencv_image = cv2.GaussianBlur(opencv_image, (3,3), 1)
+ opencv_image = cv2.Canny(opencv_image, 100, 200)
+ cv2.imwrite(render_dir + "frame_{:04d}.png".format(i), opencv_image)
+ i += 1
+
diff --git a/canny-single.sh b/canny-single.sh
new file mode 100755
index 0000000..7ad010f
--- /dev/null
+++ b/canny-single.sh
@@ -0,0 +1,14 @@
+python canny-single.py \
+ --dataroot /home/ubuntu/Desktop/thumbs/wood/B/train/ \
+ --name wood \
+ --start_img /home/ubuntu/Desktop/thumbs/wood/B/train/frame_00100.png \
+ --loadSize 256 \
+ --fineSize 256 \
+ --how_many 30 \
+ --model test \
+ --which_model_netG unet_256 \
+ --which_direction AtoB \
+ --dataset_mode recursive \
+ --norm batch
+
+# --aspect_ratio 1.777777 \
diff --git a/canny.py b/canny.py
new file mode 100644
index 0000000..b8dc6e8
--- /dev/null
+++ b/canny.py
@@ -0,0 +1,111 @@
+import os
+from options.test_options import TestOptions
+from data import CreateRecursiveDataLoader
+from models import create_model
+from util.visualizer import Visualizer
+from util.util import mkdirs
+from util import html
+from shutil import move, copyfile
+from PIL import Image, ImageOps
+from skimage.transform import resize
+from scipy.misc import imresize
+from shutil import copyfile, rmtree
+import time as time
+import numpy as np
+import cv2
+
+import subprocess
+from time import sleep
+
+if __name__ == '__main__':
+ opt = TestOptions().parse()
+ opt.nThreads = 1 # test code only supports nThreads = 1
+ opt.batchSize = 1 # test code only supports batchSize = 1
+ opt.serial_batches = True # no shuffle
+ opt.no_flip = True # no flip
+ opt.experiment = opt.start_img.split("/")[-1].split(".")[0]
+
+ render_dir = opt.results_dir + opt.name + "/exp:" + opt.experiment + "/"
+
+ if os.path.exists(render_dir):
+ rmtree(render_dir)
+ mkdirs(render_dir)
+
+ cmd = ("convert", opt.start_img, '-canny', '0x1+10%+30%', render_dir + "frame_00000.png")
+ process = subprocess.Popen(cmd, stdout=subprocess.PIPE)
+ output, error = process.communicate()
+
+ data_loader = CreateRecursiveDataLoader(opt)
+ dataset = data_loader.load_data()
+ ds = dataset.dataset
+ model = create_model(opt)
+ visualizer = Visualizer(opt)
+ # create website
+ web_dir = os.path.join(opt.results_dir, opt.name, '%s_%s' % (opt.phase, opt.which_epoch))
+ webpage = html.HTML(web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' % (opt.name, opt.phase, opt.which_epoch))
+ # test
+ last_im = None
+ for i, data in enumerate(data_loader):
+ if i >= opt.how_many:
+ break
+ model.set_input(data)
+ model.test()
+ visuals = model.get_current_visuals()
+ img_path = model.get_image_paths()
+ print('%04d: process image... %s' % (i, img_path))
+ ims = visualizer.save_images(webpage, visuals, img_path, aspect_ratio=opt.aspect_ratio)
+ if dataset.name() == 'RecursiveDatasetDataLoader':
+ # print(visuals.keys())
+ im = visuals['fake_B']
+ tmp_path = render_dir + "frame_{:05d}_tmp.png".format(i+1)
+ edges_path = render_dir + "frame_{:05d}.png".format(i+1)
+ render_path = render_dir + "ren_{:05d}.png".format(i+1)
+ # s = 256
+ # p = 8
+ # im = imresize(im, (s-p, s-p), interp='bicubic')
+ # image_pil = Image.fromarray(im)
+ # image_pil = ImageOps.expand(image_pil, p)
+ # image_pil.save(save_path)
+ # copyfile(save_path, final_path)
+ #if last_im is not None:
+ # frac_a = 999/1000
+ # frac_b = 1/1000
+ # tmp_im = im.copy()
+ # array_a = np.multiply(im, frac_a)
+ # array_b = np.multiply(last_im, frac_b)
+ # # im = np.add(array_a, array_b).astype('int8')
+ # # print(im.shape, im.dtype)
+ # last_im = np.roll(tmp_im, 1, axis=1)
+ #else:
+ # last_im = im.copy()
+ # print(im.shape, im.dtype)
+ image_pil = Image.fromarray(im, mode='RGB')
+ image_pil.save(tmp_path)
+ os.rename(tmp_path, render_path)
+
+ cmd = ("convert", render_path, '-canny', '0x1+10%+30%', tmp_path)
+ process = subprocess.Popen(cmd, stdout=subprocess.PIPE)
+ output, error = process.communicate()
+
+ os.rename(tmp_path, edges_path)
+
+ webpage.save()
+
+ os.remove(render_dir + "frame_00000.png")
+
+ t = time.time()
+ t /= 60
+ t %= 525600
+ video_fn = opt.name + "_" + opt.experiment + "_canny_" + str(opt.how_many) + "_" + str(int(t)) + ".mp4"
+
+ cmd = ("/usr/bin/ffmpeg", "-i", render_dir + "ren_%05d.png", "-y", "-c:v", "libx264", "-vf", "fps=30", "-pix_fmt", "yuv420p", render_dir + video_fn)
+ process = subprocess.Popen(cmd, stdout=subprocess.PIPE)
+ output, error = process.communicate()
+
+ print("________")
+
+ cmd = ("scp", render_dir + video_fn, "jules@asdf.us:asdf/neural/")
+ process = subprocess.Popen(cmd, stdout=subprocess.PIPE)
+ output, error = process.communicate()
+
+ print("https://asdf.us/neural/" + video_fn)
diff --git a/recursive-canny.sh b/recursive-canny.sh
new file mode 100755
index 0000000..0150438
--- /dev/null
+++ b/recursive-canny.sh
@@ -0,0 +1,14 @@
+python canny-cv.py \
+ --dataroot /home/ubuntu/Desktop/thumbs/wood/B/train/ \
+ --name woodcanny \
+ --start_img /home/ubuntu/Desktop/thumbs/wood/B/train/frame_00100.png \
+ --loadSize 256 \
+ --fineSize 256 \
+ --how_many 200 \
+ --model test \
+ --which_model_netG unet_256 \
+ --which_direction AtoB \
+ --dataset_mode recursive \
+ --norm batch
+
+# --aspect_ratio 1.777777 \
diff --git a/recursive.sh b/recursive.sh
index daffba1..6ccc60d 100755
--- a/recursive.sh
+++ b/recursive.sh
@@ -1,7 +1,7 @@
python test.py \
- --dataroot /home/lens/Desktop/thumbs/woodscaled/A/train/ \
- --name woodscaled_pix2pix \
- --start_img /home/lens/Desktop/thumbs/woodscaled/A/train/frame_1002.png \
+ --dataroot /home/ubuntu/Desktop/thumbs/wood/A/train/ \
+ --name wood \
+ --start_img /home/ubuntu/Desktop/thumbs/wood/A/train/frame_1002.png \
--how_many 2000 \
--model test \
--which_model_netG unet_256 \
diff --git a/run-canny.sh b/run-canny.sh
new file mode 100755
index 0000000..aef1118
--- /dev/null
+++ b/run-canny.sh
@@ -0,0 +1,91 @@
+dataset="woodcanny"
+
+#/home/lens/Desktop/dataset-random.pl
+
+#python datasets/combine_A_and_B.py \
+# --fold_A "$HOME/Desktop/thumbs/$dataset/A" \
+# --fold_B "$HOME/Desktop/thumbs/$dataset/B" \
+# --fold_AB "./datasets/$dataset/"
+
+python train.py \
+ --dataroot "./datasets/$dataset" \
+ --name "$dataset" \
+ --model pix2pix \
+ --loadSize 276 \
+ --fineSize 256 \
+ --which_model_netG unet_256 \
+ --which_direction AtoB \
+ --lambda_B 100 \
+ --dataset_mode aligned \
+ --which_epoch latest \
+ --continue_train \
+ --no_lsgan --norm batch --pool_size 0
+
+python canny-cv.py \
+ --dataroot /home/ubuntu/Desktop/thumbs/wood/B/train/ \
+ --name woodcanny \
+ --start_img /home/ubuntu/Desktop/thumbs/wood/B/train/frame_00100.png \
+ --loadSize 256 \
+ --fineSize 256 \
+ --how_many 200 \
+ --model test \
+ --which_model_netG unet_256 \
+ --which_direction AtoB \
+ --dataset_mode recursive \
+ --norm batch
+
+python train.py \
+ --dataroot "./datasets/$dataset" \
+ --name "$dataset" \
+ --model pix2pix \
+ --loadSize 276 \
+ --fineSize 256 \
+ --which_model_netG unet_256 \
+ --which_direction AtoB \
+ --lambda_B 100 \
+ --dataset_mode aligned \
+ --which_epoch latest \
+ --continue_train \
+ --no_lsgan --norm batch --pool_size 0
+
+python canny-cv.py \
+ --dataroot /home/ubuntu/Desktop/thumbs/wood/B/train/ \
+ --name woodcanny \
+ --start_img /home/ubuntu/Desktop/thumbs/wood/B/train/frame_00100.png \
+ --loadSize 256 \
+ --fineSize 256 \
+ --how_many 200 \
+ --model test \
+ --which_model_netG unet_256 \
+ --which_direction AtoB \
+ --dataset_mode recursive \
+ --norm batch
+
+python train.py \
+ --dataroot "./datasets/$dataset" \
+ --name "$dataset" \
+ --model pix2pix \
+ --loadSize 276 \
+ --fineSize 256 \
+ --which_model_netG unet_256 \
+ --which_direction AtoB \
+ --lambda_B 100 \
+ --dataset_mode aligned \
+ --which_epoch latest \
+ --continue_train \
+ --no_lsgan --norm batch --pool_size 0
+
+python canny-cv.py \
+ --dataroot /home/ubuntu/Desktop/thumbs/wood/B/train/ \
+ --name woodcanny \
+ --start_img /home/ubuntu/Desktop/thumbs/wood/B/train/frame_00100.png \
+ --loadSize 256 \
+ --fineSize 256 \
+ --how_many 200 \
+ --model test \
+ --which_model_netG unet_256 \
+ --which_direction AtoB \
+ --dataset_mode recursive \
+ --norm batch
+
+# --aspect_ratio 1.777777 \
diff --git a/run-flow.sh b/run-flow.sh
new file mode 100755
index 0000000..7b7ac4f
--- /dev/null
+++ b/run-flow.sh
@@ -0,0 +1,37 @@
+dataset="woodflow"
+
+#/home/lens/Desktop/dataset-random.pl
+
+#python datasets/combine_A_and_B.py \
+# --fold_A "$HOME/Desktop/thumbs/$dataset/A" \
+# --fold_B "$HOME/Desktop/thumbs/$dataset/B" \
+# --fold_AB "./datasets/$dataset/"
+
+python train.py \
+ --dataroot "./datasets/$dataset" \
+ --name "$dataset" \
+ --model pix2pix \
+ --loadSize 276 \
+ --fineSize 256 \
+ --which_model_netG unet_256 \
+ --which_direction AtoB \
+ --lambda_B 100 \
+ --dataset_mode aligned \
+ --no_lsgan --norm batch --pool_size 0
+
+# --continue_train \
+# --which_epoch latest \
+
+#python canny-cv.py \
+# --dataroot /home/ubuntu/Desktop/thumbs/wood/B/train/ \
+# --name woodcanny \
+# --start_img /home/ubuntu/Desktop/thumbs/wood/B/train/frame_00100.png \
+# --loadSize 256 \
+# --fineSize 256 \
+# --how_many 200 \
+# --model test \
+# --which_model_netG unet_256 \
+# --which_direction AtoB \
+# --dataset_mode recursive \
+# --norm batch
+
diff --git a/run.sh b/run.sh
index 43d14e2..c02972d 100755
--- a/run.sh
+++ b/run.sh
@@ -1,24 +1,25 @@
-dataset="randomcrops5k"
+dataset="woodcanny"
#/home/lens/Desktop/dataset-random.pl
-#
+
#python datasets/combine_A_and_B.py \
-# --fold_A "/home/lens/Desktop/thumbs/$dataset/A" \
-# --fold_B "/home/lens/Desktop/thumbs/$dataset/B" \
+# --fold_A "$HOME/Desktop/thumbs/$dataset/A" \
+# --fold_B "$HOME/Desktop/thumbs/$dataset/B" \
# --fold_AB "./datasets/$dataset/"
-# python train.py \
-# --dataroot "./datasets/$dataset" \
-# --name "$dataset" \
-# --model pix2pix \
-# --loadSize 256 \
-# --fineSize 256 \
-# --which_model_netG unet_256 \
-# --which_direction AtoB \
-# --lambda_B 100 \
-# --dataset_mode aligned \
-# --no_lsgan --norm batch --pool_size 0 \
-# --continue_train
+python train.py \
+ --dataroot "./datasets/$dataset" \
+ --name "$dataset" \
+ --model pix2pix \
+ --loadSize 276 \
+ --fineSize 256 \
+ --which_model_netG unet_256 \
+ --which_direction AtoB \
+ --lambda_B 100 \
+ --dataset_mode aligned \
+ --which_epoch latest \
+ --continue_train \
+ --no_lsgan --norm batch --pool_size 0
python test.py \
--dataroot "/home/lens/Desktop/thumbs/$dataset/A/train/" \
diff --git a/test.py b/test.py
index a53932d..1f82875 100644
--- a/test.py
+++ b/test.py
@@ -25,7 +25,8 @@ if __name__ == '__main__':
render_dir = opt.results_dir + opt.name + "/exp:" + opt.experiment + "/"
- rmtree(render_dir)
+ if os.path.exists(render_dir):
+ rmtree(render_dir)
mkdirs(render_dir)
copyfile(opt.start_img, render_dir + "frame_00000.png")
diff --git a/test.sh b/test.sh
new file mode 100755
index 0000000..c25c987
--- /dev/null
+++ b/test.sh
@@ -0,0 +1,38 @@
+dataset="wood"
+
+#/home/lens/Desktop/dataset-random.pl
+#
+#python datasets/combine_A_and_B.py \
+# --fold_A "$HOME/Desktop/thumbs/$dataset/A" \
+# --fold_B "$HOME/Desktop/thumbs/$dataset/B" \
+# --fold_AB "./datasets/$dataset/"
+
+# python train.py \
+# --dataroot "./datasets/$dataset" \
+# --name "$dataset" \
+# --model pix2pix \
+# --loadSize 256 \
+# --fineSize 256 \
+# --which_model_netG unet_256 \
+# --which_direction AtoB \
+# --lambda_B 100 \
+# --dataset_mode aligned \
+# --which_epoch latest \
+# --no_lsgan --norm batch --pool_size 0
+# --continue_train \
+
+python test.py \
+ --dataroot "/home/lens/Desktop/thumbs/$dataset/A/train/" \
+ --name "$dataset" \
+ --start_img "/home/lens/Desktop/thumbs/$dataset/A/train/frame_1008.png" \
+ --how_many 1000 \
+ --model test \
+ --aspect_ratio 1.777777 \
+ --which_model_netG unet_256 \
+ --which_direction AtoB \
+ --dataset_mode test \
+ --loadSize 256 \
+ --fineSize 256 \
+ --norm batch
+
+
diff --git a/util/util.py b/util/util.py
index 7a452a6..1c03715 100644
--- a/util/util.py
+++ b/util/util.py
@@ -54,3 +54,23 @@ def mkdirs(paths):
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
+
+def crop_image(img, xy, scale_factor):
+ '''Crop the image around the tuple xy
+
+ Inputs:
+ -------
+ img: Image opened with PIL.Image
+ xy: tuple with relative (x,y) position of the center of the cropped image
+ x and y shall be between 0 and 1
+ scale_factor: the ratio between the original image's size and the cropped image's size
+ '''
+ center = (img.size[0] * xy[0], img.size[1] * xy[1])
+ new_size = (img.size[0] / scale_factor, img.size[1] / scale_factor)
+ left = max (0, (int) (center[0] - new_size[0] / 2))
+ right = min (img.size[0], (int) (center[0] + new_size[0] / 2))
+ upper = max (0, (int) (center[1] - new_size[1] / 2))
+ lower = min (img.size[1], (int) (center[1] + new_size[1] / 2))
+ cropped_img = img.crop((left, upper, right, lower))
+ return cropped_img
+