From b5532f18c3b56d1715aef0a81820fd2ec7b05a70 Mon Sep 17 00:00:00 2001 From: Jules Laplace Date: Tue, 15 May 2018 00:44:46 +0200 Subject: lol typo --- gen-prime.sh | 2 +- model.py | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/gen-prime.sh b/gen-prime.sh index d5067c4..d9fa887 100755 --- a/gen-prime.sh +++ b/gen-prime.sh @@ -54,7 +54,7 @@ function gen_prime_set () { # gen_prime $1 6 44100 'noise' # gen_prime $1 6 44100 'sin' 440 # gen_prime $1 6 44100 'noise' 0 0 True - gen_prime $1 6 44100 'sin' 440 0 True + gen_prime $1 6 22050 'sin' 440 0 True ./latest.pl -n 'fixed_gradient' -l $exp_name } diff --git a/model.py b/model.py index 1f7a3fa..5e8a2ce 100644 --- a/model.py +++ b/model.py @@ -388,8 +388,8 @@ class PrimedGenerator(Runner): if i % rnn.n_frame_samples != 0: continue - # sub_sequence = get_sub_sequence(i, rnn.n_frame_samples) - sub_sequence = out_sequences[:, i - rnn.n_frame_samples : i], + sub_sequence = get_sub_sequence(i, rnn.n_frame_samples) + # sub_sequence = out_sequences[:, i - rnn.n_frame_samples : i], prev_samples = torch.autograd.Variable( 2 * utils.linear_dequantize( @@ -414,8 +414,8 @@ class PrimedGenerator(Runner): rnn, prev_samples, upper_tier_conditioning ) - sub_sequence = get_sub_sequence(i, bottom_frame_size) - # sub_sequence = out_sequences[:, i-bottom_frame_size : i] + # sub_sequence = get_sub_sequence(i, bottom_frame_size) + sub_sequence = out_sequences[:, i-bottom_frame_size : i] prev_samples = torch.autograd.Variable( sub_sequence, -- cgit v1.2.3-70-g09d2