diff options
| -rwxr-xr-x | gen-prime.sh | 2 | ||||
| -rw-r--r-- | model.py | 8 |
2 files changed, 5 insertions, 5 deletions
diff --git a/gen-prime.sh b/gen-prime.sh index d5067c4..d9fa887 100755 --- a/gen-prime.sh +++ b/gen-prime.sh @@ -54,7 +54,7 @@ function gen_prime_set () { # gen_prime $1 6 44100 'noise' # gen_prime $1 6 44100 'sin' 440 # gen_prime $1 6 44100 'noise' 0 0 True - gen_prime $1 6 44100 'sin' 440 0 True + gen_prime $1 6 22050 'sin' 440 0 True ./latest.pl -n 'fixed_gradient' -l $exp_name } @@ -388,8 +388,8 @@ class PrimedGenerator(Runner): if i % rnn.n_frame_samples != 0: continue - # sub_sequence = get_sub_sequence(i, rnn.n_frame_samples) - sub_sequence = out_sequences[:, i - rnn.n_frame_samples : i], + sub_sequence = get_sub_sequence(i, rnn.n_frame_samples) + # sub_sequence = out_sequences[:, i - rnn.n_frame_samples : i], prev_samples = torch.autograd.Variable( 2 * utils.linear_dequantize( @@ -414,8 +414,8 @@ class PrimedGenerator(Runner): rnn, prev_samples, upper_tier_conditioning ) - sub_sequence = get_sub_sequence(i, bottom_frame_size) - # sub_sequence = out_sequences[:, i-bottom_frame_size : i] + # sub_sequence = get_sub_sequence(i, bottom_frame_size) + sub_sequence = out_sequences[:, i-bottom_frame_size : i] prev_samples = torch.autograd.Variable( sub_sequence, |
