From 6917c6bd087daa5b3a140bdce0376cb1ea8d2cbc Mon Sep 17 00:00:00 2001 From: Jules Laplace Date: Mon, 14 May 2018 23:36:49 +0200 Subject: okayyyyyyy --- model.py | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) (limited to 'model.py') diff --git a/model.py b/model.py index 0a3dc54..12c0e05 100644 --- a/model.py +++ b/model.py @@ -329,7 +329,10 @@ class PrimedGenerator(Runner): q_max = q_levels print("_______-___-_---_-____") + print("_____________--_-_-_______") print("INITTTTTTTT {}".format(primer)) + if recursive: + print("RECURSIVE") print(sequences.shape) print("__________________--_-__--_________________") print("__-__________-_______________") @@ -403,6 +406,7 @@ class PrimedGenerator(Runner): ) sub_sequence = get_sub_sequence(i, bottom_frame_size) + # sub_sequence = sequences[:, i-bottom_frame_size : i] prev_samples = torch.autograd.Variable( sub_sequence, @@ -411,14 +415,21 @@ class PrimedGenerator(Runner): if self.cuda: prev_samples = prev_samples.cuda() + print("get upper tier conditioning.. {}".format(i % bottom_frame_size)) upper_tier_conditioning = \ frame_level_outputs[0][:, i % bottom_frame_size, :] \ .unsqueeze(1) + print(upper_tier_conditioning.shape) sample_dist = self.model.sample_level_mlp( prev_samples, upper_tier_conditioning ).squeeze(1).exp_().data - - out_sequences[:, i] = sample_dist.multinomial(1).squeeze(1) + print(sample_dist.shape) + multi = sample_dist.multinomial(1) + print(multi.shape) + pred = multi.squeeze(1) + print(pred.shape) + print(out_sequences.shape) + out_sequences[:, i] = pred torch.backends.cudnn.enabled = True -- cgit v1.2.3-70-g09d2