Fix total loss calculation, add some more todos
This commit is contained in:
@@ -55,6 +55,7 @@ class UnsupervisedPoseDepthLearner(keras.Model):
|
||||
# It isn't too much of an issue right now since we're only using 2 images (left/right)
|
||||
# For each depth output (scale), do the projective inverse warp on each input image and calculate the losses
|
||||
# Only take the min loss between the two warped images (from monodepth2)
|
||||
# TODO: Need to bilinear resize the depth at each scale up to the size of image
|
||||
warp1 = warp.projective_inverse_warp(data.frames[0], depth[scale], pose1, data.intrinsics, coords)
|
||||
warp2 = warp.projective_inverse_warp(data.frames[2], depth[scale], pose1, data.intrinsics, coords)
|
||||
|
||||
@@ -73,13 +74,17 @@ class UnsupervisedPoseDepthLearner(keras.Model):
|
||||
tf.reduce_min(tf.concat([warp_loss1, warp_loss2, source_loss1, source_loss2], axis=3), axis=3))
|
||||
|
||||
# Calculate smooth losses
|
||||
# TODO: Since smooth loss is calculated directly on the depth at the scale, we need
|
||||
# to resize the target image to the same dimensions as the depth map at the current scale
|
||||
# Can do this by just inspecting the shape of the depth and resizing to match that (but
|
||||
# with 3 colour channels)
|
||||
smooth_loss = loss.smooth_loss(depth[scale], data.frames[1])
|
||||
|
||||
# TODO: Monodepth also divides the smooth loss by 2 ** scale, why?
|
||||
# SFM Learner downscales smoothing loss depending on the scale
|
||||
smoothed_reprojection_loss = self.smoothness * smooth_loss / (2 ** scale)
|
||||
|
||||
# Add to total loss (with smooth loss + smooth loss weighting applied to pixel losses)
|
||||
total_loss += reprojection_loss + smooth_loss
|
||||
total_loss += reprojection_loss + smoothed_reprojection_loss
|
||||
pass
|
||||
|
||||
# Collect losses, average them out (divide by number of scales)
|
||||
|
||||
Reference in New Issue
Block a user