summaryrefslogtreecommitdiff
path: root/model.py
diff options
context:
space:
mode:
Diffstat (limited to 'model.py')
-rw-r--r--model.py95
1 files changed, 93 insertions, 2 deletions
diff --git a/model.py b/model.py
index 567e533..a6a33d5 100644
--- a/model.py
+++ b/model.py
@@ -242,14 +242,14 @@ class Generator(Runner):
super().__init__(model)
self.cuda = cuda
- def __call__(self, n_seqs, seq_len, primer):
+ def __call__(self, n_seqs, seq_len):
# generation doesn't work with CUDNN for some reason
torch.backends.cudnn.enabled = False
self.reset_hidden_states()
bottom_frame_size = self.model.frame_level_rnns[0].n_frame_samples
- sequences = torch.LongTensor(n_seqs, self.model.lookback + seq_len) \
+ sequences = torch.LongTensor(n_seqs, self.model.lookback + seq_len)
.fill_(utils.q_zero(self.model.q_levels))
frame_level_outputs = [None for _ in self.model.frame_level_rnns]
@@ -299,3 +299,94 @@ class Generator(Runner):
torch.backends.cudnn.enabled = True
return sequences[:, self.model.lookback :]
+
+
+import math
+import random
+
+class PrimedGenerator(Runner):
+
+ def __init__(self, model, cuda=False):
+ super().__init__(model)
+ self.cuda = cuda
+
+ def __call__(self, n_seqs, seq_len, primer):
+ # generation doesn't work with CUDNN for some reason
+ torch.backends.cudnn.enabled = False
+
+ self.reset_hidden_states()
+
+ n_samples = self.model.lookback + seq_len
+ bottom_frame_size = self.model.frame_level_rnns[0].n_frame_samples
+ sequences = torch.LongTensor(n_seqs, n_samples) # 64-bit int
+ frame_level_outputs = [None for _ in self.model.frame_level_rnns]
+
+ q_levels = self.model.q_levels
+ primer_freq = 440
+
+ print("_______-___-_---_-____")
+ print("_____________--_-_-_______")
+ print("INITTTTTTTT")
+ print(sequences.shape)
+ print("__________________--_-__--_________________")
+ print("__-__________-_______________")
+
+ def noise(x):
+ for i in xrange(n_samples):
+ x[:, i] = random.triangular(0, q_levels)
+ def sin(x):
+ for i in xrange(n_samples):
+ x[:, i] = (math.sin(i/44100 * primer_freq) + 1) / 2 * q_levels
+
+ sequences = {
+ 'zero': lambda x: x.fill_(utils.q_zero(self.model.q_levels)),
+ 'noise': noise,
+ 'sin': sin,
+ }.get(value, default)(sequences)
+
+ for i in range(self.model.lookback, self.model.lookback + seq_len):
+ for (tier_index, rnn) in \
+ reversed(list(enumerate(self.model.frame_level_rnns))):
+ if i % rnn.n_frame_samples != 0:
+ continue
+
+ prev_samples = torch.autograd.Variable(
+ 2 * utils.linear_dequantize(
+ sequences[:, i - rnn.n_frame_samples : i],
+ self.model.q_levels
+ ).unsqueeze(1),
+ volatile=True
+ )
+ if self.cuda:
+ prev_samples = prev_samples.cuda()
+
+ if tier_index == len(self.model.frame_level_rnns) - 1:
+ upper_tier_conditioning = None
+ else:
+ frame_index = (i // rnn.n_frame_samples) % \
+ self.model.frame_level_rnns[tier_index + 1].frame_size
+ upper_tier_conditioning = \
+ frame_level_outputs[tier_index + 1][:, frame_index, :] \
+ .unsqueeze(1)
+
+ frame_level_outputs[tier_index] = self.run_rnn(
+ rnn, prev_samples, upper_tier_conditioning
+ )
+
+ prev_samples = torch.autograd.Variable(
+ sequences[:, i - bottom_frame_size : i],
+ volatile=True
+ )
+ if self.cuda:
+ prev_samples = prev_samples.cuda()
+ upper_tier_conditioning = \
+ frame_level_outputs[0][:, i % bottom_frame_size, :] \
+ .unsqueeze(1)
+ sample_dist = self.model.sample_level_mlp(
+ prev_samples, upper_tier_conditioning
+ ).squeeze(1).exp_().data
+ sequences[:, i] = sample_dist.multinomial(1).squeeze(1)
+
+ torch.backends.cudnn.enabled = True
+
+ return sequences[:, self.model.lookback :]