diff options
| -rw-r--r-- | crop-equirectangular-2048.py | 80 | ||||
| -rw-r--r-- | train_1024.sh | 11 |
2 files changed, 91 insertions, 0 deletions
diff --git a/crop-equirectangular-2048.py b/crop-equirectangular-2048.py new file mode 100644 index 0000000..77e57e3 --- /dev/null +++ b/crop-equirectangular-2048.py @@ -0,0 +1,80 @@ +import os +import glob +import argparse +from shutil import rmtree +from PIL import Image +from multiprocessing import Pool, cpu_count +from dotenv import load_dotenv, find_dotenv +import subprocess +load_dotenv(find_dotenv()) + +# This script generates frames from a 360 equirectangular video and crops out the bottom third. + +parser = argparse.ArgumentParser() +parser.add_argument('--folder', default="./sequences/venice_360/equi_4096/") +parser.add_argument('--label', default="venice_360") +parser.add_argument('--folder_id', type=int, required=True) +parser.add_argument('--overlap', type=float, default=0.5) +parser.add_argument('--clobber', action='store_false') +opt = parser.parse_args() + +src_width = 4096 +src_height = 2048 + +crop_width = 4096 +crop_height = 1670 + +crop_dim = (0, 0, crop_width, crop_height,) + +label_0 = opt.label + "_equi_2048" +label_1 = opt.label + "_equi_1024" +labels = [label_0, label_1] +path_0 = os.path.join("sequences", label_0) +path_1 = os.path.join("sequences", label_1) + +if opt.clobber: + if os.path.exists(path_0): + rmtree(path_0) + if os.path.exists(path_1): + rmtree(path_1) + +os.makedirs(path_0) +os.makedirs(path_1) + +dataset = [] +for i, fn in enumerate(sorted(glob.glob(os.path.join(opt.folder, '*.png')))): + out_fn = "frame_{:05d}.png".format(i + 1) + if not opt.clobber and os.path.exists(os.path.join(path_1, out_fn)): + continue + dataset.append((i, fn,)) + +def build_thumbnail(i, fn): + out_fn = "frame_{:05d}.png".format(i + 1) + if (i % 100) == 0: + print("{}...".format(i)) + + image = Image.open(fn).crop(crop_dim) + image.resize((2048, 1024,), Image.ANTIALIAS).save(os.path.join(path_0, out_fn)) + image.resize((1024, 512,), Image.ANTIALIAS).save(os.path.join(path_1, out_fn)) + +chunksize = 3 +with Pool(processes=cpu_count()) as pool: + pool.starmap(build_thumbnail, dataset, chunksize) + +if opt.folder_id > 0: + endpoint = os.getenv('API_REMOTE') + '/api/file/' + for label in labels: + subprocess.call([ + "curl", + "-X", "POST", + "-d", "folder_id={}".format(opt.folder_id), + "-d", "module=pix2pixhd", + "-d", "name={}.mov".format(label), + "-d", "url=https://s3.amazonaws.com/i.asdf.us/cortex/lens/data/{}/{}.mov".format(opt.folder_id, label), + "-d", "dataset={}".format(label), + "-d", "activity=splice", + "-d", "generated=0", + "-d", "processed=1", + "-d", "datatype=video", + endpoint + ]) diff --git a/train_1024.sh b/train_1024.sh new file mode 100644 index 0000000..b0d3321 --- /dev/null +++ b/train_1024.sh @@ -0,0 +1,11 @@ +dataset="venice_360_2048" +module="pix2pixhd" +folder_id=33 +curl -X POST -d folder_id=$folder_id -d module=$module -d name=$dataset.mp4 -d url=https://s3.amazonaws.com/i.asdf.us/cortex/lens/data/$folder_id/$dataset.mp4 -d dataset=$dataset -d activity=splice -d generated=0 -d processed=1 -d datatype=video https://lens.neural.garden/api/file/ + +# frame 12600 -> 14098 + +dataset="" + +python train.py --dataroot "./datasets/${dataset}/" --name "${dataset}_1024" --label_nc 0 --no_instance --netG local --ngf 32 --num_D 3 --load_pretrain checkpoints/label2city_512p/ --niter_fix_global 20 --resize_or_crop crop --fineSize 1024 + |
