1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
|
import os
import glob
import argparse
from shutil import rmtree
from PIL import Image
from multiprocessing import Pool
from dotenv import load_dotenv, find_dotenv
import subprocess
load_dotenv(find_dotenv())
# This script generates crops with a specific aspect ratio from a 360 video.
# It creates three sequences (identified by "--label")
# The default is a 24 degree overlap (equivalent to 1/6 of the 3:1 output image)
# Setting a higher overlap means you can have taller vertical FOV.
parser = argparse.ArgumentParser()
parser.add_argument('--folder', default="./pix2pixhd/sequences/venice_360/equi")
parser.add_argument('--label', required=True)
parser.add_argument('--vertical_offset', type=int, default=983)
parser.add_argument('--folder_id', type=int, required=True)
parser.add_argument('--overlap', type=float, default=0.5)
parser.add_argument('--clobber', action='store_false')
opt = parser.parse_args()
src_width = 4096
src_height = 2048
count = 3
output_aspect = 3
overall_aspect = count * output_aspect
overlapped_aspect = overall_aspect - count * opt.overlap
crop_width = src_width * output_aspect / overlapped_aspect
crop_height = crop_width / output_aspect
c0 = 7/6 * src_width
c1 = 3/6 * src_width
c2 = 5/6 * src_width
x0 = c0 - crop_width / 2
x1 = c1 - crop_width / 2
x2 = c2 - crop_width / 2
y0 = opt.vertical_offset - crop_height / 2
p0 = (x0, y0, x0 + crop_width, y0 + crop_height,)
p1 = (x1, y0, x1 + crop_width, y0 + crop_height,)
p2 = (x2, y0, x2 + crop_width, y0 + crop_height,)
label_0 = opt.label + '_a'
label_1 = opt.label + '_b'
label_2 = opt.label + '_c'
labels = [label_0, label_1, label_2]
path_0 = os.path.join("sequences", label_0)
path_1 = os.path.join("sequences", label_1)
path_2 = os.path.join("sequences", label_2)
if opt.clobber:
if os.path.exists(path_0):
rmtree(path_0)
if os.path.exists(path_1):
rmtree(path_1)
if os.path.exists(path_2):
rmtree(path_2)
os.makedirs(path_0)
os.makedirs(path_1)
os.makedirs(path_2)
dataset = []
for i, fn in enumerate(sorted(glob.glob(os.path.join(opt.folder, '*.png')))):
out_fn = "frame_{:05d}.png".format(i + 1)
if not opt.clobber and os.path.exists(os.path.join(path_0, out_fn)):
continue
dataset.append((i, fn,))
def build_thumbnail(i, fn):
out_fn = "frame_{:05d}.png".format(i + 1)
if (i % 100) == 0:
print("{}...".format(i))
canvas = Image.new('RGB', (int(src_width * 3/2), src_height,))
image = Image.open(fn)
canvas.paste(image, (0, 0))
canvas.paste(image, (src_width, 0))
canvas.crop(p0).resize((1024, 512,), Image.ANTIALIAS).save(os.path.join(path_0, out_fn))
canvas.crop(p1).resize((1024, 512,), Image.ANTIALIAS).save(os.path.join(path_1, out_fn))
canvas.crop(p2).resize((1024, 512,), Image.ANTIALIAS).save(os.path.join(path_2, out_fn))
agents = multiprocessing.cpu_count()
chunksize = 3
with Pool(processes=agents) as pool:
pool.starmap(build_thumbnail, dataset, chunksize)
if opt.folder_id > 0:
endpoint = os.getenv('API_REMOTE') + '/api/folder/{}/'.format(opt.folder_id)
for label in labels:
subprocess.call([
"curl",
"-X", "POST",
"-d", "folder_id={}".format(opt.folder_id),
"-d", "module=pix2pixhd",
"-d", "name={}.mov".format(label),
"-d", "url=https://s3.amazonaws.com/i.asdf.us/cortex/lens/data/{}/{}.mov".format(opt.folder_id, label),
"-d", "dataset={}".format(label),
"-d", "activity=splice",
"-d", "generated=0",
"-d", "processed=1",
"-d", "datatype=video",
endpoint
])
|