import path from 'path' import fs from 'fs' const name = 'pix2pixhd' const cwd = process.env.PIX2PIXHD_CWD || path.join(process.env.HOME, 'code/' + name + '/') const env = { LD_LIBRARY_PATH: '/usr/local/cuda/lib64:' + process.env.HOME + '/Downloads/TensorRT-4.0.0.3/lib', } const fetch = { type: 'perl', script: 'get.pl', params: (task) => { console.log(task) return [ task.opt.url ] }, listen: (task, res, i) => { // relay the new dataset name from youtube-dl or w/e const lines = res.split('\n') for (let line of lines) { console.log(line) if ( line.match(/^created dataset: /) ) { let tag = line.split(': ')[1].trim() task.dataset = tag // task.opt.filename = filename console.log(">>>>>> created dataset", tag) return { type: 'progress', action: 'resolve_dataset', task } } } return null }, after: 'build', } const build = { type: 'perl', script: 'build_dataset.pl', params: (task) => { return [ task.dataset, ] } } const train = { type: 'pytorch', script: 'train.py', params: (task) => { let epoch = 0 const dataset = task.dataset.toLowerCase() const datasets_path = path.join(cwd, 'datasets', dataset) const checkpoints_path = path.join(cwd, 'checkpoints', dataset) const iter_txt = path.join(checkpoints_path, 'iter.txt') console.log(dataset, iter_txt) if (fs.existsSync(iter_txt)) { const iter = fs.readFileSync(iter_txt).toString().split('\n'); console.log(iter) epoch = iter[0] || 0 console.log(task.module, dataset, '=>', epoch, task.epochs) } else { console.log(task.module, dataset, '=>', 'starting new training') } let args = [ '--dataroot', datasets_path, '--module_name', task.module, '--name', dataset, '--model', 'pix2pixHD', '--label_nc', 0, '--no_instance', '--niter', task.epochs, '--niter_decay', 0, ] if (epoch) { args = args.concat([ '--which_epoch', 'latest', '--continue_train', ]) } return args }, } const generate = { type: 'pytorch', script: 'test.py', params: (task) => { return [ '--dataroot', '/sequences/' + task.dataset, '--module_name', task.module, '--name', task.dataset, '--start_img', '/sequences/' + task.dataset + '/frame_00001.png', '--how_many', 1000, '--model', 'test', '--aspect_ratio', 1.777777, '--which_model_netG', 'unet_256', '--which_direction', 'AtoB', '--dataset_mode', 'test', '--loadSize', 256, '--fineSize', 256, '--norm', 'batch' ] }, } const live = { type: 'pytorch', script: 'live.py', params: (task) => { console.log(task) const opt = task.opt || {} return [ '--phase', 'recursive', '--dataroot', path.join(cwd, 'sequences', task.dataset), '--start_img', path.join(cwd, 'sequences', task.dataset, 'frame_00001.png'), '--checkpoint-name', task.checkpoint, '--experiment', task.checkpoint, '--name', task.checkpoint, '--module_name', 'pix2pixHD', '--sequence-name', task.dataset, '--recursive', '--recursive-frac', 0.1, '--sequence', '--sequence-frac', 0.3, '--process-frac', 0.5, '--label_nc', '0', '--no_instance', '--how_many', 10000, '--transition-period', 1000, '--just-copy', '--poll_delay', opt.poll_delay || 0.09, '--which_epoch', 'latest', '--norm', 'batch', '--store_b', // comment this line to store all live output ] }, listen: (task, res, i) => { // relay the new dataset name from youtube-dl or w/e const lines = res.split('\n') for (let line of lines) { console.log(line) if ( line.match(/^final result: /) ) { let tag = line.split(': ')[1].trim() task.dataset = tag console.log(">>>>>> recording live to", tag) return { type: 'progress', action: 'resolve_dataset', task } } } return null }, after: 'render', } const render = { type: 'perl', script: 'dir-to-movie.pl', params: (task) => { return [ '--tag', task.dataset, '--module', task.module, '--endpoint', process.env.API_REMOTE + '/api/folder/' + task.opt.folder_id + '/upload/', ] } } export default { name, cwd, env, activities: { fetch, build, train, generate, live, render, } }