summaryrefslogtreecommitdiff
path: root/Codes/loss_functions.py
diff options
context:
space:
mode:
Diffstat (limited to 'Codes/loss_functions.py')
-rw-r--r--Codes/loss_functions.py54
1 files changed, 54 insertions, 0 deletions
diff --git a/Codes/loss_functions.py b/Codes/loss_functions.py
new file mode 100644
index 0000000..ca97966
--- /dev/null
+++ b/Codes/loss_functions.py
@@ -0,0 +1,54 @@
+import tensorflow as tf
+import numpy as np
+
+
+def flow_loss(gen_flows, gt_flows):
+ print(gen_flows['flow'])
+ return tf.reduce_mean(tf.abs(gen_flows['flow'] - gt_flows['flow']))
+
+
+def intensity_loss(gen_frames, gt_frames, l_num):
+ """
+ Calculates the sum of lp losses between the predicted and ground truth frames.
+
+ @param gen_frames: The predicted frames at each scale.
+ @param gt_frames: The ground truth frames at each scale
+ @param l_num: 1 or 2 for l1 and l2 loss, respectively).
+
+ @return: The lp loss.
+ """
+ return tf.reduce_mean(tf.abs((gen_frames - gt_frames) ** l_num))
+
+
+def gradient_loss(gen_frames, gt_frames, alpha):
+ """
+ Calculates the sum of GDL losses between the predicted and ground truth frames.
+
+ @param gen_frames: The predicted frames at each scale.
+ @param gt_frames: The ground truth frames at each scale
+ @param alpha: The power to which each gradient term is raised.
+
+ @return: The GDL loss.
+ """
+ # calculate the loss for each scale
+ # create filters [-1, 1] and [[1],[-1]] for diffing to the left and down respectively.
+
+ channels = gen_frames.get_shape().as_list()[-1]
+ pos = tf.constant(np.identity(channels), dtype=tf.float32) # 3 x 3
+ neg = -1 * pos
+ filter_x = tf.expand_dims(tf.stack([neg, pos]), 0) # [-1, 1]
+ filter_y = tf.stack([tf.expand_dims(pos, 0), tf.expand_dims(neg, 0)]) # [[1],[-1]]
+ strides = [1, 1, 1, 1] # stride of (1, 1)
+ padding = 'SAME'
+
+ gen_dx = tf.abs(tf.nn.conv2d(gen_frames, filter_x, strides, padding=padding))
+ gen_dy = tf.abs(tf.nn.conv2d(gen_frames, filter_y, strides, padding=padding))
+ gt_dx = tf.abs(tf.nn.conv2d(gt_frames, filter_x, strides, padding=padding))
+ gt_dy = tf.abs(tf.nn.conv2d(gt_frames, filter_y, strides, padding=padding))
+
+ grad_diff_x = tf.abs(gt_dx - gen_dx)
+ grad_diff_y = tf.abs(gt_dy - gen_dy)
+
+ # condense into one tensor and avg
+ return tf.reduce_mean(grad_diff_x ** alpha + grad_diff_y ** alpha)
+