diff options
| author | Jules Laplace <julescarbon@gmail.com> | 2018-05-14 23:01:21 +0200 |
|---|---|---|
| committer | Jules Laplace <julescarbon@gmail.com> | 2018-05-14 23:01:21 +0200 |
| commit | 390b95e3aabd9c660babb72a9d89dc1a99fc947b (patch) | |
| tree | a407c7f3c0e875da1d1e38760725b3fed037f3bd /model.py | |
| parent | 4d35f463da308f240b64b30dfe4f38dc0c15a486 (diff) | |
okayyyyyyy
Diffstat (limited to 'model.py')
| -rw-r--r-- | model.py | 7 |
1 files changed, 5 insertions, 2 deletions
@@ -385,8 +385,8 @@ class PrimedGenerator(Runner): print("ENTERING LOOP....") for i in range(self.model.lookback, self.model.lookback + seq_len): - if (i % 100) == 0: - print("{}...".format(i)) + # if (i % 100) == 0: + print("{}...".format(i)) for (tier_index, rnn) in \ reversed(list(enumerate(self.model.frame_level_rnns))): @@ -417,7 +417,9 @@ class PrimedGenerator(Runner): frame_level_outputs[tier_index] = self.run_rnn( rnn, prev_samples, upper_tier_conditioning ) + print("ran rnn") + print("at bottom frame") sub_sequence = get_sub_sequence(i, bottom_frame_size) prev_samples = torch.autograd.Variable( @@ -426,6 +428,7 @@ class PrimedGenerator(Runner): ) if self.cuda: prev_samples = prev_samples.cuda() + print "get upp" upper_tier_conditioning = \ frame_level_outputs[0][:, i % bottom_frame_size, :] \ .unsqueeze(1) |
