From bb9d6b928e8ea2ead95cdad14e1b68dd870d16c1 Mon Sep 17 00:00:00 2001 From: Jules Laplace Date: Mon, 14 May 2018 23:34:27 +0200 Subject: okayyyyyyy --- model.py | 32 +++----------------------------- 1 file changed, 3 insertions(+), 29 deletions(-) (limited to 'model.py') diff --git a/model.py b/model.py index 8592ddc..0a3dc54 100644 --- a/model.py +++ b/model.py @@ -329,25 +329,18 @@ class PrimedGenerator(Runner): q_max = q_levels print("_______-___-_---_-____") - print("_____________--_-_-_______") print("INITTTTTTTT {}".format(primer)) - if recursive: - print("RECURSIVE") print(sequences.shape) print("__________________--_-__--_________________") print("__-__________-_______________") def _noise(x): for i in range(n_samples): - if (i % 1000) == 0: - print("{}...".format(i)) x[:, i] = int(random.triangular(q_min, q_max)) return x def _sin(x): primer_freq = float(prime_param_a) for i in range(n_samples): - if (i % 1000) == 0: - print("{}...".format(i)) x[:, i] = int((math.sin(i/44100 * primer_freq) + 1) / 2 * (q_max - q_min) + q_min) return x @@ -357,19 +350,14 @@ class PrimedGenerator(Runner): 'sin': _sin, } - print("BUILDING SEQUENCES") - sequences = sequence_lookup.get(primer, 'zero')(sequences) # out_sequences = sequence_lookup.get(primer, 'zero')(out_sequences) out_sequences[:, :self.model.lookback] = sequences[:, :self.model.lookback] - print("BUILT....") - # here we are generating the subsequence each time, # but maybe we want to generate the subsequence bottom_frame_size length # and then draw from that, although this will really emphasize the recursion def get_sub_sequence(i, n): - print("get subsequence {} {}".format(i, n)) sub_sequence_a = sequences[:, i-n : i] # generated sub_sequence_b = out_sequences[:, i-n : i] # recursive tmp_sub_sequence = tmp_sequences[:, i-n : i] # tmp/output @@ -382,11 +370,7 @@ class PrimedGenerator(Runner): return tmp_sub_sequence - print("ENTERING LOOP....") - for i in range(self.model.lookback, self.model.lookback + seq_len): - # if (i % 100) == 0: - print("{}...".format(i)) for (tier_index, rnn) in \ reversed(list(enumerate(self.model.frame_level_rnns))): @@ -417,11 +401,8 @@ class PrimedGenerator(Runner): frame_level_outputs[tier_index] = self.run_rnn( rnn, prev_samples, upper_tier_conditioning ) - print("ran rnn") - print("at bottom frame") - # sub_sequence = get_sub_sequence(i, bottom_frame_size) - sub_sequence = sequences[:, i-bottom_frame_size : i] + sub_sequence = get_sub_sequence(i, bottom_frame_size) prev_samples = torch.autograd.Variable( sub_sequence, @@ -430,21 +411,14 @@ class PrimedGenerator(Runner): if self.cuda: prev_samples = prev_samples.cuda() - print("get upper tier conditioning.. {}".format(i % bottom_frame_size)) upper_tier_conditioning = \ frame_level_outputs[0][:, i % bottom_frame_size, :] \ .unsqueeze(1) - print(upper_tier_conditioning.shape) sample_dist = self.model.sample_level_mlp( prev_samples, upper_tier_conditioning ).squeeze(1).exp_().data - print(sample_dist.shape) - multi = sample_dist.multinomial(1) - print(multi.shape) - pred = multi.squeeze(1) - print(pred.shape) - print(out_sequences.shape) - out_sequences[:, i] = pred + + out_sequences[:, i] = sample_dist.multinomial(1).squeeze(1) torch.backends.cudnn.enabled = True -- cgit v1.2.3-70-g09d2