summaryrefslogtreecommitdiff
path: root/app/relay/modules/samplernn.js
blob: 1b8f550baaed45f50cc0567a3c29f532e5d2d1c6 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
import path from 'path'

const name = 'samplernn'
const cwd = process.env.SAMPLERNN_CWD || path.join(process.env.HOME, 'code/' + name + '/')

const fetch = {
  type: 'perl',
  script: 'get.pl',
  params: (task) => {
    console.log(task)
    return [ task.opt.url ]
  },
  listen: (task, line, i) => {
    // here i need to bridge again... get the filename that comes back from youtube-dl
    // and tell the cortex that URL -> fn and add the filename!
    if ( line.match(/^youtube-dl got fn, /) ) {
      let filename = line.split(' => ')[1].trim()
      task.dataset = filename.split('.')[0]
      task.opt.filename = filename
      return { type: 'progress', action: 'resolve_dataset', task, }
    }
    return null
  }
}
const train = {
  type: 'pytorch_samplernn',
  script: 'train.py',
  params: (task) => {
    return [
      '--exp', task.dataset,
      '--dataset', task.dataset,
      '--frame_sizes', '8', '2',
      '--n_rnn', '2',
      '--epoch_limit', task.epochs || 4,
      '--sample_length', task.opt.sample_length || 44100 * 5,
      '--n_samples', task.opt.n_samples || 6,
      '--keep_old_checkpoints', task.opt.keep_old_checkpoints ? 'True' : 'False',
    ]
  },
  listen: (task, line, i) => {
    // ################################################################################
    // Epoch summary:
    // training_loss: 1.4509 validation_loss: 1.4260 test_loss: 1.4316
    // ################################################################################
    // 
    if ( line.match(/Epoch summary/) ) {
      task.epoch += 1
      return { type: 'progress', action: 'epoch', task, }
    }
    return null
  },
  after: 'publish',
}
const generate = {
  type: 'pytorch_samplernn',
  script: 'generate.py',
  params: (task) => {
    return [
      '--exp', task.dataset,
      '--dataset', task.dataset,
      '--frame_sizes', '8', '2',
      '--n_rnn', '2',
      '--sample_length', task.opt.sample_length || 44100 * 5,
      '--n_samples', task.opt.n_samples || 6,
    ]
  },
  after: 'publish',
}
const publish = {
  type: 'perl',
  script: 'latest.pl',
  params: (task) => {
    const params = [
      '-e', process.env.API_REMOTE + '/api/folder/' + task.folder_id + '/upload/',
      '-l', task.dataset,
    ]
    if (task.activity === 'generate') {
      params.push('-n') // tag the generated ones
      params.push([
        task.id,
        Math.round((task.opt.sample_length || 44100 * 5) / 44100) + 's',
        task.opt.n_samples + 'x',
      ].join('_'))
    }
  }
}
const clear_cache = {
  type: 'perl',
  script: 'clear_cache.pl',
  params: (task) => {
    return ['-l', task.dataset]
  }
}
const report = {
  type: 'perl',
  script: 'latest.pl',
  params: ['-v'],
  isScript: true,
}

export default {
  name, cwd,
  activities: {
    fetch, train, generate, publish, clear_cache, report,
  },
}