diff options
| -rw-r--r-- | model.py | 15 |
1 files changed, 7 insertions, 8 deletions
@@ -321,9 +321,6 @@ class PrimedGenerator(Runner): sequences = torch.LongTensor(n_seqs, n_samples) # 64-bit int frame_level_outputs = [None for _ in self.model.frame_level_rnns] - out_sequences = torch.LongTensor(n_seqs, n_samples).fill_(utils.q_zero(self.model.q_levels)) - tmp_sequences = torch.LongTensor(n_seqs, n_samples).fill_(utils.q_zero(self.model.q_levels)) - q_levels = self.model.q_levels q_width = 64 q_min = int(q_levels / 2 - q_width / 2) @@ -369,6 +366,8 @@ class PrimedGenerator(Runner): } sequences = sequence_lookup.get(primer, 'zero')(sequences) + out_sequences = torch.LongTensor(n_seqs, n_samples).fill_(utils.q_zero(self.model.q_levels)) + tmp_sequences = torch.LongTensor(n_seqs, n_samples).fill_(utils.q_zero(self.model.q_levels)) # out_sequences = sequence_lookup.get(primer, 'zero')(out_sequences) out_sequences[:, :self.model.lookback] = sequences[:, :self.model.lookback] @@ -383,7 +382,7 @@ class PrimedGenerator(Runner): pj = bottom_frame_size - n for j in range(n): ratio = (pj + j) / (bottom_frame_size-1) - ratio *= ratio + # ratio *= ratio a = sub_sequence_a[:, j].float() * (1-ratio) b = sub_sequence_b[:, j].float() * ratio tmp_sub_sequence[:, j] = torch.clamp(a + b, 0, q_levels).long() @@ -399,8 +398,8 @@ class PrimedGenerator(Runner): if i % rnn.n_frame_samples != 0: continue - sub_sequence = get_sub_sequence(i, rnn.n_frame_samples) - # sub_sequence = out_sequences[:, i - rnn.n_frame_samples : i], + # sub_sequence = get_sub_sequence(i, rnn.n_frame_samples) + sub_sequence = out_sequences[:, i - rnn.n_frame_samples : i], prev_samples = torch.autograd.Variable( 2 * utils.linear_dequantize( @@ -425,8 +424,8 @@ class PrimedGenerator(Runner): rnn, prev_samples, upper_tier_conditioning ) - # sub_sequence = get_sub_sequence(i, bottom_frame_size) - sub_sequence = out_sequences[:, i-bottom_frame_size : i] + sub_sequence = get_sub_sequence(i, bottom_frame_size) + # sub_sequence = out_sequences[:, i-bottom_frame_size : i] prev_samples = torch.autograd.Variable( sub_sequence, |
