summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJules Laplace <julescarbon@gmail.com>2018-05-15 00:57:36 +0200
committerJules Laplace <julescarbon@gmail.com>2018-05-15 00:57:36 +0200
commitf6f3b68b2f84568e9c34a7e73b00df3f1f1b9268 (patch)
tree944fbc0782fa2f9477e9bb1de6e9169a5b9bc013
parent29c8a5fc29b4618ac1e6ef6bbc21962fbc2b6859 (diff)
lol typo
-rw-r--r--model.py15
1 files changed, 7 insertions, 8 deletions
diff --git a/model.py b/model.py
index c84a3e5..1a6c14c 100644
--- a/model.py
+++ b/model.py
@@ -321,9 +321,6 @@ class PrimedGenerator(Runner):
sequences = torch.LongTensor(n_seqs, n_samples) # 64-bit int
frame_level_outputs = [None for _ in self.model.frame_level_rnns]
- out_sequences = torch.LongTensor(n_seqs, n_samples).fill_(utils.q_zero(self.model.q_levels))
- tmp_sequences = torch.LongTensor(n_seqs, n_samples).fill_(utils.q_zero(self.model.q_levels))
-
q_levels = self.model.q_levels
q_width = 64
q_min = int(q_levels / 2 - q_width / 2)
@@ -369,6 +366,8 @@ class PrimedGenerator(Runner):
}
sequences = sequence_lookup.get(primer, 'zero')(sequences)
+ out_sequences = torch.LongTensor(n_seqs, n_samples).fill_(utils.q_zero(self.model.q_levels))
+ tmp_sequences = torch.LongTensor(n_seqs, n_samples).fill_(utils.q_zero(self.model.q_levels))
# out_sequences = sequence_lookup.get(primer, 'zero')(out_sequences)
out_sequences[:, :self.model.lookback] = sequences[:, :self.model.lookback]
@@ -383,7 +382,7 @@ class PrimedGenerator(Runner):
pj = bottom_frame_size - n
for j in range(n):
ratio = (pj + j) / (bottom_frame_size-1)
- ratio *= ratio
+ # ratio *= ratio
a = sub_sequence_a[:, j].float() * (1-ratio)
b = sub_sequence_b[:, j].float() * ratio
tmp_sub_sequence[:, j] = torch.clamp(a + b, 0, q_levels).long()
@@ -399,8 +398,8 @@ class PrimedGenerator(Runner):
if i % rnn.n_frame_samples != 0:
continue
- sub_sequence = get_sub_sequence(i, rnn.n_frame_samples)
- # sub_sequence = out_sequences[:, i - rnn.n_frame_samples : i],
+ # sub_sequence = get_sub_sequence(i, rnn.n_frame_samples)
+ sub_sequence = out_sequences[:, i - rnn.n_frame_samples : i],
prev_samples = torch.autograd.Variable(
2 * utils.linear_dequantize(
@@ -425,8 +424,8 @@ class PrimedGenerator(Runner):
rnn, prev_samples, upper_tier_conditioning
)
- # sub_sequence = get_sub_sequence(i, bottom_frame_size)
- sub_sequence = out_sequences[:, i-bottom_frame_size : i]
+ sub_sequence = get_sub_sequence(i, bottom_frame_size)
+ # sub_sequence = out_sequences[:, i-bottom_frame_size : i]
prev_samples = torch.autograd.Variable(
sub_sequence,