1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
|
import path from 'path'
const name = 'pix2pix'
const cwd = process.env.PIX2PIX_CWD || path.join(process.env.HOME, 'code/' + name + '/')
/*
what are all the tasks that pix2pix has to do?
- fetch url
- fetch youtube
- ffmpeg movie into frames
- unzip zip file into sequence
- list sequences
*/
const fetch = {
type: 'perl',
script: 'get.pl',
params: (task) => {
console.log(task)
return [ task.module, task.opt.url ]
},
listen: (task, line, i) => {
// relay the new dataset name from youtube-dl or w/e
if ( line.match(/^created dataset: /) ) {
let filename = line.split(': ')[1].trim()
task.dataset = filename.split('.')[0]
task.opt.filename = filename
return { type: 'progress', action: 'resolve_dataset', task, }
}
return null
}
}
const combine_folds = {
type: 'pytorch',
script: 'datasets/combine_A_and_B.py',
params: (task) => {
return [
'--fold_A', task.module + '/a_b/' + task.dataset + '/A',
'--fold_B', task.module + '/a_b/' + task.dataset + '/B',
'--fold_AB', task.module + '/datasets/' + task.dataset,
]
}
}
const train = {
type: 'pytorch',
script: 'train.py',
params: (task) => {
return [
'--dataroot', './datasets/' + task.dataset,
'--name', task.dataset,
'--model', 'pix2pix',
'--loadSize', opt.load_size || 264,
'--fineSize', 256,
'--which_model_netG', 'unet_256',
'--which_direction', 'AtoB',
'--lambda_B', 100,
'--dataset_mode', 'aligned',
'--epoch_count', task.epochs,
'--which_epoch', 'latest',
'--continue_train',
'--no_lsgan',
'--norm', 'batch',
'--pool_size', '0',
'--cortex_module', task.module,
]
},
}
const generate = {
type: 'pytorch',
script: 'test.py',
params: (task) => {
return [
'--dataroot', '/sequences/' + task.module + '/' + task.dataset,
'--name', task.dataset,
'--start_img', '/sequences/' + task.module + '/' + task.dataset + '/frame_00001.png',
'--how_many', 1000,
'--model', 'test',
'--aspect_ratio', 1.777777,
'--which_model_netG', 'unet_256',
'--which_direction', 'AtoB',
'--dataset_mode', 'test',
'--loadSize', 256,
'--fineSize', 256,
'--norm', 'batch'
]
},
}
const live = {
type: 'pytorch',
script: 'live-mogrify.py',
params: (task) => {
return [
'--dataroot', '/sequences/' + task.module + '/' + task.dataset,
'--start_img', '/sequences/' + task.module + '/' + task.dataset + '/frame_00001.png',
'--experiment', task.checkpoint,
'--name', task.checkpoint,
'--module-name', task.module,
'--recursive', '--recursive-frac', 0.1,
'--sequence', '--sequence-frac', 0.3,
'--process-frac', 0.5,
'--transition',
'--transition-min', 0.05,
'--how_many', 1000000, '--transition-period', 1000,
'--loadSize', 256, '--fineSize', 256,
'--just-copy', '--poll_delay', 0.09,
'--model', 'test',
'--which_model_netG', 'unet_256',
'--which_direction', 'AtoB',
'--dataset_mode', 'recursive',
'--which_epoch', 'latest',
'--norm', 'batch',
]
},
}
export default {
name, cwd,
activities: {
combine_folds, train, generate, live,
}
}
|