summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--generate.py2
-rw-r--r--model.py95
-rw-r--r--train.py2
-rw-r--r--trainer/plugins.py7
-rw-r--r--utils.py2
5 files changed, 101 insertions, 7 deletions
diff --git a/generate.py b/generate.py
index fb63b09..c4059ec 100644
--- a/generate.py
+++ b/generate.py
@@ -52,7 +52,7 @@ default_params = {
'loss_smoothing': 0.99,
'cuda': True,
'comet_key': None,
- 'primer': 'zero'
+ 'primer': ''
}
tag_params = [
diff --git a/model.py b/model.py
index 567e533..a6a33d5 100644
--- a/model.py
+++ b/model.py
@@ -242,14 +242,14 @@ class Generator(Runner):
super().__init__(model)
self.cuda = cuda
- def __call__(self, n_seqs, seq_len, primer):
+ def __call__(self, n_seqs, seq_len):
# generation doesn't work with CUDNN for some reason
torch.backends.cudnn.enabled = False
self.reset_hidden_states()
bottom_frame_size = self.model.frame_level_rnns[0].n_frame_samples
- sequences = torch.LongTensor(n_seqs, self.model.lookback + seq_len) \
+ sequences = torch.LongTensor(n_seqs, self.model.lookback + seq_len)
.fill_(utils.q_zero(self.model.q_levels))
frame_level_outputs = [None for _ in self.model.frame_level_rnns]
@@ -299,3 +299,94 @@ class Generator(Runner):
torch.backends.cudnn.enabled = True
return sequences[:, self.model.lookback :]
+
+
+import math
+import random
+
+class PrimedGenerator(Runner):
+
+ def __init__(self, model, cuda=False):
+ super().__init__(model)
+ self.cuda = cuda
+
+ def __call__(self, n_seqs, seq_len, primer):
+ # generation doesn't work with CUDNN for some reason
+ torch.backends.cudnn.enabled = False
+
+ self.reset_hidden_states()
+
+ n_samples = self.model.lookback + seq_len
+ bottom_frame_size = self.model.frame_level_rnns[0].n_frame_samples
+ sequences = torch.LongTensor(n_seqs, n_samples) # 64-bit int
+ frame_level_outputs = [None for _ in self.model.frame_level_rnns]
+
+ q_levels = self.model.q_levels
+ primer_freq = 440
+
+ print("_______-___-_---_-____")
+ print("_____________--_-_-_______")
+ print("INITTTTTTTT")
+ print(sequences.shape)
+ print("__________________--_-__--_________________")
+ print("__-__________-_______________")
+
+ def noise(x):
+ for i in xrange(n_samples):
+ x[:, i] = random.triangular(0, q_levels)
+ def sin(x):
+ for i in xrange(n_samples):
+ x[:, i] = (math.sin(i/44100 * primer_freq) + 1) / 2 * q_levels
+
+ sequences = {
+ 'zero': lambda x: x.fill_(utils.q_zero(self.model.q_levels)),
+ 'noise': noise,
+ 'sin': sin,
+ }.get(value, default)(sequences)
+
+ for i in range(self.model.lookback, self.model.lookback + seq_len):
+ for (tier_index, rnn) in \
+ reversed(list(enumerate(self.model.frame_level_rnns))):
+ if i % rnn.n_frame_samples != 0:
+ continue
+
+ prev_samples = torch.autograd.Variable(
+ 2 * utils.linear_dequantize(
+ sequences[:, i - rnn.n_frame_samples : i],
+ self.model.q_levels
+ ).unsqueeze(1),
+ volatile=True
+ )
+ if self.cuda:
+ prev_samples = prev_samples.cuda()
+
+ if tier_index == len(self.model.frame_level_rnns) - 1:
+ upper_tier_conditioning = None
+ else:
+ frame_index = (i // rnn.n_frame_samples) % \
+ self.model.frame_level_rnns[tier_index + 1].frame_size
+ upper_tier_conditioning = \
+ frame_level_outputs[tier_index + 1][:, frame_index, :] \
+ .unsqueeze(1)
+
+ frame_level_outputs[tier_index] = self.run_rnn(
+ rnn, prev_samples, upper_tier_conditioning
+ )
+
+ prev_samples = torch.autograd.Variable(
+ sequences[:, i - bottom_frame_size : i],
+ volatile=True
+ )
+ if self.cuda:
+ prev_samples = prev_samples.cuda()
+ upper_tier_conditioning = \
+ frame_level_outputs[0][:, i % bottom_frame_size, :] \
+ .unsqueeze(1)
+ sample_dist = self.model.sample_level_mlp(
+ prev_samples, upper_tier_conditioning
+ ).squeeze(1).exp_().data
+ sequences[:, i] = sample_dist.multinomial(1).squeeze(1)
+
+ torch.backends.cudnn.enabled = True
+
+ return sequences[:, self.model.lookback :]
diff --git a/train.py b/train.py
index a40e5f6..974d8d8 100644
--- a/train.py
+++ b/train.py
@@ -52,7 +52,7 @@ default_params = {
'loss_smoothing': 0.99,
'cuda': True,
'comet_key': None
- 'primer': 'zero',
+ 'primer': '',
}
tag_params = [
diff --git a/trainer/plugins.py b/trainer/plugins.py
index dc3b24a..562355e 100644
--- a/trainer/plugins.py
+++ b/trainer/plugins.py
@@ -1,7 +1,7 @@
import matplotlib
matplotlib.use('Agg')
-from model import Generator
+from model import Generator, PrimedGenerator
import torch
from torch.autograd import Variable
@@ -152,7 +152,10 @@ class GeneratorPlugin(Plugin):
self.primer = primer
def register(self, trainer):
- self.generate = Generator(trainer.model.model, trainer.cuda)
+ if self.primer == "":
+ self.generate = Generator(trainer.model.model, trainer.cuda)
+ else:
+ self.generate = PrimedGenerator(trainer.model.model, trainer.cuda)
def epoch(self, epoch_index):
samples = self.generate(self.n_samples, self.sample_length, self.primer) \
diff --git a/utils.py b/utils.py
index 183ce53..b0b295e 100644
--- a/utils.py
+++ b/utils.py
@@ -1,7 +1,7 @@
import torch
from torch import nn
import numpy as np
-
+from random import triangular
EPSILON = 1e-2