diff options
| author | jules@lens <julescarbon@gmail.com> | 2018-04-24 20:19:24 +0200 |
|---|---|---|
| committer | jules@lens <julescarbon@gmail.com> | 2018-04-24 20:19:24 +0200 |
| commit | 83e91e0a8effcd20466e56b6ecc3e349bbfa5e0e (patch) | |
| tree | 377977c1068fa2411ac6b0e7c6da4aa97873ae8b /Code/d_model.py | |
| parent | 9b0d10f357871231bbec06c610363588812216e1 (diff) | |
Diffstat (limited to 'Code/d_model.py')
| -rw-r--r-- | Code/d_model.py | 13 |
1 files changed, 9 insertions, 4 deletions
diff --git a/Code/d_model.py b/Code/d_model.py index 1345ceb..02b882d 100644 --- a/Code/d_model.py +++ b/Code/d_model.py @@ -89,8 +89,8 @@ class DiscriminatorModel: name='train_op') # add summaries to visualize in TensorBoard - loss_summary = tf.scalar_summary('loss_D', self.global_loss) - self.summaries = tf.merge_summary([loss_summary]) + loss_summary = tf.summary.scalar('loss_D', self.global_loss) + self.summaries = tf.summary.merge([loss_summary]) def build_feed_dict(self, input_frames, gt_output_frames, generator): """ @@ -123,14 +123,19 @@ class DiscriminatorModel: for scale_num in xrange(self.num_scale_nets): scale_net = self.scale_nets[scale_num] + broken = 0 # resize gt_output_frames scaled_gt_output_frames = np.empty([batch_size, scale_net.height, scale_net.width, 3]) for i, img in enumerate(gt_output_frames): # for skimage.transform.resize, images need to be in range [0, 1], so normalize to # [0, 1] before resize and back to [-1, 1] after sknorm_img = (img / 2) + 0.5 - resized_frame = resize(sknorm_img, [scale_net.height, scale_net.width, 3]) - scaled_gt_output_frames[i] = (resized_frame - 0.5) * 2 + try: + resized_frame = resize(sknorm_img, [scale_net.height, scale_net.width, 3]) + scaled_gt_output_frames[i-broken] = (resized_frame - 0.5) * 2 + except: + broken += 1 + #print str(broken) + " " + "broken images" # combine with resized gt_output_frames to get inputs for prediction scaled_input_frames = np.concatenate([g_scale_preds[scale_num], |
