diff options
Diffstat (limited to 'model.py')
| -rw-r--r-- | model.py | 39 |
1 files changed, 31 insertions, 8 deletions
@@ -321,10 +321,8 @@ class PrimedGenerator(Runner): sequences = torch.LongTensor(n_seqs, n_samples) # 64-bit int frame_level_outputs = [None for _ in self.model.frame_level_rnns] - if recursive: - out_sequences = sequences - else: - out_sequences = torch.LongTensor(n_seqs, n_samples).fill_(utils.q_zero(self.model.q_levels)) + out_sequences = torch.LongTensor(n_seqs, n_samples).fill_(utils.q_zero(self.model.q_levels)) + tmp_sequences = torch.LongTensor(n_seqs, n_samples).fill_(utils.q_zero(self.model.q_levels)) q_levels = self.model.q_levels q_min = 0 @@ -349,21 +347,44 @@ class PrimedGenerator(Runner): x[:, i] = int((math.sin(i/44100 * primer_freq) + 1) / 2 * (q_max - q_min) + q_min) return x - sequences = { + sequence_lookup = { 'zero': lambda x: x.fill_(utils.q_zero(self.model.q_levels)), 'noise': _noise, 'sin': _sin, - }.get(primer, 'zero')(sequences) + } + + sequences = sequence_lookup.get(primer, 'zero')(sequences) + # out_sequences = sequence_lookup.get(primer, 'zero')(out_sequences) + out_sequences[:, :self.model.lookback] = sequences[:, :self.model.lookback] + + # here we are generating the subsequence each time, + # but maybe we want to generate the subsequence bottom_frame_size length + # and then draw from that, although this will really emphasize the recursion + def get_sub_sequence(i, n): + sub_sequence_a = sequences[:, i-n : i] # generated + sub_sequence_b = out_sequences[:, i-n : i] # recursive + tmp_sub_sequence = tmp_sequences[:, i-n : i] # tmp/output + + for j in range(n): + ratio = i / (n-1) + a = sub_sequence_a[j] * (1-ratio) + b = sub_sequence_b[j] * ratio + tmp_sub_sequence[:, j] = int( a + b ) + + return tmp_sub_sequence for i in range(self.model.lookback, self.model.lookback + seq_len): + for (tier_index, rnn) in \ reversed(list(enumerate(self.model.frame_level_rnns))): if i % rnn.n_frame_samples != 0: continue + sub_sequence = get_sub_sequence(i, rnn.n_frame_samples) + prev_samples = torch.autograd.Variable( 2 * utils.linear_dequantize( - sequences[:, i - rnn.n_frame_samples : i], + sub_sequence, self.model.q_levels ).unsqueeze(1), volatile=True @@ -384,8 +405,10 @@ class PrimedGenerator(Runner): rnn, prev_samples, upper_tier_conditioning ) + sub_sequence = get_sub_sequence(i, bottom_frame_size) + prev_samples = torch.autograd.Variable( - sequences[:, i - bottom_frame_size : i], + sub_sequence, volatile=True ) if self.cuda: |
