diff options
| author | Jules Laplace <julescarbon@gmail.com> | 2018-05-14 23:34:27 +0200 |
|---|---|---|
| committer | Jules Laplace <julescarbon@gmail.com> | 2018-05-14 23:34:27 +0200 |
| commit | bb9d6b928e8ea2ead95cdad14e1b68dd870d16c1 (patch) | |
| tree | 5204fffacf05ab15be4f6ba801aea113c30b9bc7 /model.py | |
| parent | 9a7ec713d189ceddedb07e9e86f152a71eec3f46 (diff) | |
okayyyyyyy
Diffstat (limited to 'model.py')
| -rw-r--r-- | model.py | 32 |
1 files changed, 3 insertions, 29 deletions
@@ -329,25 +329,18 @@ class PrimedGenerator(Runner): q_max = q_levels print("_______-___-_---_-____") - print("_____________--_-_-_______") print("INITTTTTTTT {}".format(primer)) - if recursive: - print("RECURSIVE") print(sequences.shape) print("__________________--_-__--_________________") print("__-__________-_______________") def _noise(x): for i in range(n_samples): - if (i % 1000) == 0: - print("{}...".format(i)) x[:, i] = int(random.triangular(q_min, q_max)) return x def _sin(x): primer_freq = float(prime_param_a) for i in range(n_samples): - if (i % 1000) == 0: - print("{}...".format(i)) x[:, i] = int((math.sin(i/44100 * primer_freq) + 1) / 2 * (q_max - q_min) + q_min) return x @@ -357,19 +350,14 @@ class PrimedGenerator(Runner): 'sin': _sin, } - print("BUILDING SEQUENCES") - sequences = sequence_lookup.get(primer, 'zero')(sequences) # out_sequences = sequence_lookup.get(primer, 'zero')(out_sequences) out_sequences[:, :self.model.lookback] = sequences[:, :self.model.lookback] - print("BUILT....") - # here we are generating the subsequence each time, # but maybe we want to generate the subsequence bottom_frame_size length # and then draw from that, although this will really emphasize the recursion def get_sub_sequence(i, n): - print("get subsequence {} {}".format(i, n)) sub_sequence_a = sequences[:, i-n : i] # generated sub_sequence_b = out_sequences[:, i-n : i] # recursive tmp_sub_sequence = tmp_sequences[:, i-n : i] # tmp/output @@ -382,11 +370,7 @@ class PrimedGenerator(Runner): return tmp_sub_sequence - print("ENTERING LOOP....") - for i in range(self.model.lookback, self.model.lookback + seq_len): - # if (i % 100) == 0: - print("{}...".format(i)) for (tier_index, rnn) in \ reversed(list(enumerate(self.model.frame_level_rnns))): @@ -417,11 +401,8 @@ class PrimedGenerator(Runner): frame_level_outputs[tier_index] = self.run_rnn( rnn, prev_samples, upper_tier_conditioning ) - print("ran rnn") - print("at bottom frame") - # sub_sequence = get_sub_sequence(i, bottom_frame_size) - sub_sequence = sequences[:, i-bottom_frame_size : i] + sub_sequence = get_sub_sequence(i, bottom_frame_size) prev_samples = torch.autograd.Variable( sub_sequence, @@ -430,21 +411,14 @@ class PrimedGenerator(Runner): if self.cuda: prev_samples = prev_samples.cuda() - print("get upper tier conditioning.. {}".format(i % bottom_frame_size)) upper_tier_conditioning = \ frame_level_outputs[0][:, i % bottom_frame_size, :] \ .unsqueeze(1) - print(upper_tier_conditioning.shape) sample_dist = self.model.sample_level_mlp( prev_samples, upper_tier_conditioning ).squeeze(1).exp_().data - print(sample_dist.shape) - multi = sample_dist.multinomial(1) - print(multi.shape) - pred = multi.squeeze(1) - print(pred.shape) - print(out_sequences.shape) - out_sequences[:, i] = pred + + out_sequences[:, i] = sample_dist.multinomial(1).squeeze(1) torch.backends.cudnn.enabled = True |
