import os import glob import argparse from shutil import rmtree from PIL import Image from multiprocessing import Pool, cpu_count from dotenv import load_dotenv, find_dotenv import subprocess load_dotenv(find_dotenv()) # This script generates crops with a specific aspect ratio from a 360 video. # It creates three sequences (identified by "--label") # The default is a 24 degree overlap (equivalent to 1/6 of the 3:1 output image) # Setting a higher overlap means you can have taller vertical FOV. parser = argparse.ArgumentParser() parser.add_argument('--folder', default="./sequences/venice_360/equi") parser.add_argument('--label', required=True) parser.add_argument('--vertical_offset', type=int, default=983) parser.add_argument('--folder_id', type=int, required=True) parser.add_argument('--overlap', type=float, default=0.5) parser.add_argument('--clobber', action='store_false') opt = parser.parse_args() src_width = 4096 src_height = 2048 src_one_half_width = int(src_width * 3/2) count = 3 output_aspect = 3 overall_aspect = count * output_aspect overlapped_aspect = overall_aspect - count * opt.overlap crop_width = src_width * output_aspect / overlapped_aspect crop_height = crop_width / output_aspect # c0 = 7/6 * src_width c1 = 3/6 * src_width # c2 = 5/6 * src_width # x0 = c0 - crop_width / 2 x1 = c1 - crop_width / 2 # x2 = c2 - crop_width / 2 y0 = opt.vertical_offset - crop_height / 2 # p0 = (x0, y0, x0 + crop_width, y0 + crop_height,) # p1 = (x1, y0, x1 + crop_width, y0 + crop_height,) # p2 = (x2, y0, x2 + crop_width, y0 + crop_height,) # label_0 = opt.label + '_a' label_1 = opt.label # label_2 = opt.label + '_c' # labels = [label_0, label_1, label_2] labels = [label_1] # path_0 = os.path.join("sequences", label_0) path_1 = os.path.join("sequences", label_1) # path_2 = os.path.join("sequences", label_2) if opt.clobber: # if os.path.exists(path_0): # rmtree(path_0) if os.path.exists(path_1): rmtree(path_1) # if os.path.exists(path_2): # rmtree(path_2) # os.makedirs(path_0) os.makedirs(path_1) # os.makedirs(path_2) dataset = [] for i, fn in enumerate(sorted(glob.glob(os.path.join(opt.folder, '*.png')))): out_fn = "frame_{:05d}.png".format(i + 1) if not opt.clobber and os.path.exists(os.path.join(path_1, out_fn)): continue dataset.append((i, fn,)) def build_thumbnail(i, fn): out_fn = "frame_{:05d}.png".format(i + 1) if (i % 100) == 0: print("{}...".format(i)) canvas = Image.new('RGB', (src_one_half_width, src_height,)) image = Image.open(fn) canvas.paste(image, (0, 0)) canvas.paste(image, (src_width, 0)) ii = i % src_width p1 = (x1 + ii, y0, x1 + crop_width + ii, y0 + crop_height,) # canvas.crop(p0).resize((1024, 512,), Image.ANTIALIAS).save(os.path.join(path_0, out_fn)) canvas.crop(p1).resize((1024, 512,), Image.ANTIALIAS).save(os.path.join(path_1, out_fn)) # canvas.crop(p2).resize((1024, 512,), Image.ANTIALIAS).save(os.path.join(path_2, out_fn)) chunksize = 3 with Pool(processes=cpu_count()) as pool: pool.starmap(build_thumbnail, dataset, chunksize) if opt.folder_id > 0: endpoint = os.getenv('API_REMOTE') + '/api/file/' for label in labels: subprocess.call([ "curl", "-X", "POST", "-d", "folder_id={}".format(opt.folder_id), "-d", "module=pix2pixhd", "-d", "name={}.mov".format(label), "-d", "url=https://s3.amazonaws.com/i.asdf.us/cortex/lens/data/{}/{}.mov".format(opt.folder_id, label), "-d", "dataset={}".format(label), "-d", "activity=splice", "-d", "generated=0", "-d", "processed=1", "-d", "datatype=video", endpoint ])