summaryrefslogtreecommitdiff
path: root/app/relay/modules/pix2pix.js
diff options
context:
space:
mode:
authorJules Laplace <julescarbon@gmail.com>2018-06-06 23:26:05 +0200
committerJules Laplace <julescarbon@gmail.com>2018-06-06 23:26:05 +0200
commit0852209a85f5c5d4e9885dcce72d1262d40dc0f9 (patch)
treedfc951c50d81470f81f428bbb6b3b88032c17425 /app/relay/modules/pix2pix.js
parent26b80e09cd64d5bb5b40bc7872aff397d9cc80ea (diff)
p2p tasks
Diffstat (limited to 'app/relay/modules/pix2pix.js')
-rw-r--r--app/relay/modules/pix2pix.js127
1 files changed, 89 insertions, 38 deletions
diff --git a/app/relay/modules/pix2pix.js b/app/relay/modules/pix2pix.js
index 3727964..d7b9478 100644
--- a/app/relay/modules/pix2pix.js
+++ b/app/relay/modules/pix2pix.js
@@ -3,69 +3,120 @@ import path from 'path'
const name = 'pix2pix'
const cwd = process.env.PIX2PIX_CWD || path.join(process.env.HOME, 'code/' + name + '/')
-const dataset = {
+/*
+ what are all the tasks that pix2pix has to do?
+ - fetch url
+ - fetch youtube
+ - ffmpeg movie into frames
+ - unzip zip file into sequence
+ - list sequences
+ -
+*/
+
+const fetch = {
+ type: 'perl',
+ script: 'get.pl',
+ params: (task) => {
+ console.log(task)
+ return [ task.opt.url ]
+ },
+ listen: (task, line, i) => {
+ // here i need to bridge again... get the filename that comes back from youtube-dl
+ // and tell the cortex that URL -> fn and add the filename!
+ if ( line.match(/^youtube-dl got fn, /) ) {
+ let filename = line.split(' => ')[1].trim()
+ task.dataset = filename.split('.')[0]
+ task.opt.filename = filename
+ return { type: 'progress', action: 'resolve_dataset', task, }
+ }
+ return null
+ }
+}
+
+const combine_folds = {
type: 'pytorch',
script: 'datasets/combine_A_and_B.py',
params: (task) => {
+ return [
+ '--fold_A', task.module + '/a_b/' + task.dataset + '/A',
+ '--fold_B', task.module + '/a_b/' + task.dataset + '/B',
+ '--fold_AB', task.module + '/datasets/' + task.dataset,
+ ]
}
-// python datasets/combine_A_and_B.py \
-// --fold_A /home/lens/Desktop/thumbs/woodscaled_4/A \
-// --fold_B /home/lens/Desktop/thumbs/woodscaled_4/B \
-// --fold_AB datasets/woodscaled_4/
}
const train = {
type: 'pytorch',
script: 'train.py',
params: (task) => {
+ return [
+ '--dataroot', './datasets/' + task.dataset,
+ '--name', task.dataset,
+ '--model', 'pix2pix',
+ '--loadSize', opt.load_size || 264,
+ '--fineSize', 256,
+ '--which_model_netG', 'unet_256',
+ '--which_direction', 'AtoB',
+ '--lambda_B', 100,
+ '--dataset_mode', 'aligned',
+ '--epoch_count', task.epochs,
+ '--which_epoch', 'latest',
+ '--continue_train',
+ '--no_lsgan',
+ '--norm', 'batch',
+ '--pool_size', '0',
+ ]
},
-// python train.py \
-// --dataroot "./datasets/$dataset" \
-// --name "$dataset" \
-// --model pix2pix \
-// --loadSize 264 \
-// --fineSize 256 \
-// --which_model_netG unet_256 \
-// --which_direction AtoB \
-// --lambda_B 100 \
-// --dataset_mode aligned \
-// --epoch_count $epochs \
-// --which_epoch latest \
-// --continue_train \
-// --no_lsgan --norm batch --pool_size 0
}
const generate = {
type: 'pytorch',
- script: 'generate.py',
+ script: 'test.py',
params: (task) => {
+ return [
+ '--dataroot'
+ '--name', task.dataset,
+ '--start_img', task.module + '/sequences/' + task.dataset,
+ '--how_many', 1000,
+ '--model', 'test',
+ '--aspect_ratio', 1.777777,
+ '--which_model_netG', 'unet_256',
+ '--which_direction', 'AtoB',
+ '--dataset_mode', 'test',
+ '--loadSize', 256,
+ '--fineSize', 256,
+ '--norm', 'batch'
+ ]
},
}
const live = {
type: 'pytorch',
script: 'live-mogrify.py',
params: (task) => {
+ return [
+ '--dataroot', task.module + '/sequences/' + task.dataset,
+ '--start_img', task.module + '/sequences/' + task.dataset + '/frame_00001.png',
+ '--experiment', task.checkpoint,
+ '--name', task.checkpoint,
+ '--recursive', '--recursive-frac', 0.1,
+ '--sequence', '--sequence-frac', 0.3,
+ '--process-frac', 0.5,
+ '--transition',
+ '--transition-min', 0.05,
+ '--how_many', 1000000, '--transition-period', 1000,
+ '--loadSize', 256, '--fineSize', 256,
+ '--just-copy', '--poll_delay', 0.09,
+ '--model', 'test',
+ '--which_model_netG', 'unet_256',
+ '--which_direction', 'AtoB',
+ '--dataset_mode', 'recursive',
+ '--which_epoch', 'latest',
+ '--norm', 'batch',
+ ]
},
- // python live-mogrify.py \
- // --dataroot "./sequences/$sequence" \
- // --start_img "./sequences/$sequence/frame_00001.png" \
- // --experiment "$checkpoint" \
- // --name "$checkpoint" \
- // --recursive --recursive-frac 0.1 \
- // --sequence --sequence-frac 0.3 \
- // --process-frac 0.5 \
- // --transition \
- // --transition-min 0.05 \
- // --how_many 100000 --transition-period 1000 \
- // --loadSize 256 --fineSize 256 \
- // --just-copy --poll_delay 0.09 \
- // --model test --which_model_netG unet_256 \
- // --which_direction AtoB --dataset_mode recursive \
- // --which_epoch latest \
- // --norm batch
}
export default {
name, cwd,
activities: {
- dataset, train, generate, live,
+ combine_folds, train, generate, live,
}
}