1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
|
import path from 'path'
const name = 'pix2pix'
const cwd = process.env.PIX2PIX_CWD || path.join(process.env.HOME, 'code/' + name + '/')
const dataset = {
type: 'pytorch',
script: 'datasets/combine_A_and_B.py',
params: (task) => {
}
// python datasets/combine_A_and_B.py \
// --fold_A /home/lens/Desktop/thumbs/woodscaled_4/A \
// --fold_B /home/lens/Desktop/thumbs/woodscaled_4/B \
// --fold_AB datasets/woodscaled_4/
}
const train = {
type: 'pytorch',
script: 'train.py',
params: (task) => {
},
// python train.py \
// --dataroot "./datasets/$dataset" \
// --name "$dataset" \
// --model pix2pix \
// --loadSize 264 \
// --fineSize 256 \
// --which_model_netG unet_256 \
// --which_direction AtoB \
// --lambda_B 100 \
// --dataset_mode aligned \
// --epoch_count $epochs \
// --which_epoch latest \
// --continue_train \
// --no_lsgan --norm batch --pool_size 0
}
const generate = {
type: 'pytorch',
script: 'generate.py',
params: (task) => {
},
}
const live = {
type: 'pytorch',
script: 'live-mogrify.py',
params: (task) => {
},
// python live-mogrify.py \
// --dataroot "./sequences/$sequence" \
// --start_img "./sequences/$sequence/frame_00001.png" \
// --experiment "$checkpoint" \
// --name "$checkpoint" \
// --recursive --recursive-frac 0.1 \
// --sequence --sequence-frac 0.3 \
// --process-frac 0.5 \
// --transition \
// --transition-min 0.05 \
// --how_many 100000 --transition-period 1000 \
// --loadSize 256 --fineSize 256 \
// --just-copy --poll_delay 0.09 \
// --model test --which_model_netG unet_256 \
// --which_direction AtoB --dataset_mode recursive \
// --which_epoch latest \
// --norm batch
}
export default {
name, cwd,
activities: {
dataset, train, generate, live,
}
}
|