From 26dda685232f9f1e6ebe3ccf29b5a2436300e694 Mon Sep 17 00:00:00 2001 From: Michael Pivato Date: Thu, 5 Aug 2021 08:20:31 +0000 Subject: [PATCH 1/3] Add Smooth Loss --- unsupervised/loss.py | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/unsupervised/loss.py b/unsupervised/loss.py index d62e5b1..c4057a1 100644 --- a/unsupervised/loss.py +++ b/unsupervised/loss.py @@ -51,3 +51,34 @@ def make_combined_ssim_l1_loss(ssim_weight: int = 0.85, other_loss_fn=l1_loss): return ssim_weight * ssim + (1 - ssim_weight) * other_loss_fn(target_img, reprojected_img) return combined_ssim_loss + + +# TODO: Consider other gradient methods for calculating smoothness, e.g. convolution methods such as Sobel +def smooth_loss(depth, colour_image): + """ + Calculate the edge-aware per-pixel smooth loss on a depth map, with image scaled appropriately to the depth map + + Does this equation (equation 3 in monodepth2 paper): + |dxd*t|e^(-|dxIt|) + |dyd*t|e^(-|dyIt|) + + :param depth: Tensor with shape (B, h, w, 1) - disparity, such as the depth map + :param colour_image: Tensor with shape (B, h, w, 3) - colour image, same resolution as disparity map + :return: smooth loss + """ + # Mean normalised inverse depth + normalised_depth = depth / (tf.reduce_mean(depth, [1, 2], keepdims=True) + 1e-7) + + # Nothing fancy here for gradients (follows sfmlearner/monodepth), just shift 1 pixel and + # compare the change (x/y shift left/up 1 pixel) + depth_gradient_x = tf.abs(normalised_depth[:, :-1, :, :] - normalised_depth[:, 1:, :, :]) + depth_gradient_y = tf.abs(normalised_depth[:, :, :-1, :] - normalised_depth[:, :, 1:, :]) + + # Colour gradients to work better with edges, monodepth 1/2 uses these + image_gradient_x = tf.abs(colour_image[:, :-1, :, :] - colour_image[:, 1:, :, :]) + image_gradient_y = tf.abs(colour_image[:, :, :-1, :] - colour_image[:, :, 1:, :]) + + # Average the 3 colour channels into a single channel, so can be compared with the depth disparities + smooth_x = depth_gradient_x * tf.exp(-tf.reduce_mean(image_gradient_x, 3, keepdims=True)) + smooth_y = depth_gradient_y * tf.exp(-tf.reduce_mean(image_gradient_y, 3, keepdims=True)) + + return tf.reduce_mean(smooth_x) + tf.reduce_mean(smooth_y) From 58b8e53986643d2d9955cb054e06864afa3497e6 Mon Sep 17 00:00:00 2001 From: Piv <18462828+Piv200@users.noreply.github.com> Date: Sat, 7 Aug 2021 21:00:17 +0930 Subject: [PATCH 2/3] Fix packnet residual block and layers, refactor to support different amount of residual layers I noticed the number of parameters didn't match up to the paper (~128 million) Fixed this by doing the following: - Kernel size of 1 for 3rd conv2d in residual block - Use add rather than concat in residual block - Fixed add/concat features in decode layers - Fixed final layers -> this also allows features_3d == 16 to work --- packnet_functional.py | 47 ++++++++++++++++++++++++------------------- 1 file changed, 26 insertions(+), 21 deletions(-) diff --git a/packnet_functional.py b/packnet_functional.py index c404c07..3929df6 100644 --- a/packnet_functional.py +++ b/packnet_functional.py @@ -22,10 +22,10 @@ def residual_layer(inputs, out_channels, stride, dropout=None): x = layers.Conv2D(out_channels, 3, padding='same', strides=stride)(inputs) x = layers.Conv2D(out_channels, 3, padding='same')(x) shortcut = layers.Conv2D( - out_channels, 3, padding='same', strides=stride)(inputs) + out_channels, 1, padding='same', strides=stride)(inputs) if dropout: shortcut = keras.layers.SpatialDropout2D(dropout)(shortcut) - x = keras.layers.Concatenate()([x, shortcut]) + x = keras.layers.Add()([x, shortcut]) x = group_norm.GroupNormalization(16)(x) return keras.layers.ELU()(x) @@ -53,10 +53,10 @@ def packnet_inverse_depth(inputs, out_channels=1, min_depth=0.5): def pack_3d(inputs, kernel_size, r=2, features_3d=8): """ Implementatino of the 3d packing block proposed here: https://arxiv.org/abs/1905.02693 - :param inputs: - :param kernel_size: - :param r: - :param features_3d: + :param inputs: Tensor inputs + :param kernel_size: Conv3D kernels size + :param r: Packing factor + :param features_3d: Packing depth (increase to increase number of parameters and accuracy) :return: """ # Data format for single image in nyu is HWC (space_to_depth uses NHWC as default) @@ -78,7 +78,6 @@ def unpack_3d(inputs, out_channels, kernel_size, r=2, features_3d=8): return nn.depth_to_space(x, r) -# TODO: Support different size packnet for scaling up/down # TODO: Support different channel format (right now we're supporting NHWC, we should also support NCHW) def make_packnet(shape=(224, 224, 3), skip_add=True, features_3d=4, dropout=None): """ @@ -109,42 +108,48 @@ def make_packnet(shape=(224, 224, 3), skip_add=True, features_3d=4, dropout=None # ================ ENCODER ================= # ================ DECODER ================= - # layer 7 - x = unpack_3d(x, 512, 3, features_3d=features_3d) + # Addition requires we half the outputs so there is a matching number of channels + divide_factor = (2 if skip_add else 1) + # layer 12 - 13 + x = unpack_3d(x, 512 // divide_factor, 3, features_3d=features_3d) x = keras.layers.Add()( [x, skip_5]) if skip_add else keras.layers.Concatenate()([x, skip_5]) x = packnet_conv2d(x, 512, 3, 1) - # layer 8 - x = unpack_3d(x, 256, 3, features_3d=features_3d) + # layer 14 - 15 + x = unpack_3d(x, 256 // divide_factor, 3, features_3d=features_3d) x = keras.layers.Add()( [x, skip_4]) if skip_add else keras.layers.Concatenate()([x, skip_4]) x = packnet_conv2d(x, 256, 3, 1) layer_8 = x - # layer 9 + # layer 16 x = packnet_inverse_depth(x, 1) - # layer 10 - u_layer_8 = unpack_3d(layer_8, 128, 3, features_3d=features_3d) + # layer 17 - 18 + u_layer_8 = unpack_3d(layer_8, 128 // divide_factor, 3, features_3d=features_3d) x = keras.layers.UpSampling2D()(x) x = keras.layers.Add()([u_layer_8, skip_3, x]) if skip_add else keras.layers.Concatenate()([u_layer_8, skip_3, x]) x = packnet_conv2d(x, 128, 3, 1) layer_10 = x - # layer 11 + # layer 19 x = packnet_inverse_depth(x, 1) - # layer 12 + # layer 20 - 21 u_layer_10 = unpack_3d(layer_10, 64, 3, features_3d=features_3d) x = keras.layers.UpSampling2D()(x) x = keras.layers.Add()([u_layer_10, skip_2, x]) if skip_add else keras.layers.Concatenate()([u_layer_10, skip_2, x]) x = packnet_conv2d(x, 64, 3, 1) layer_12 = x - # layer 13 + # layer 22 x = packnet_inverse_depth(x) - # layer 14 - u_layer_12 = unpack_3d(layer_12, 32, 3, features_3d=features_3d) + # layer 23 - 24 + u_layer_12 = unpack_3d(layer_12, 64, 3, features_3d=features_3d) x = keras.layers.UpSampling2D()(x) x = keras.layers.Add()([u_layer_12, skip_1, x]) if skip_add else keras.layers.Concatenate()([u_layer_12, skip_1, x]) - x = packnet_conv2d(x, 32, 3, 1) - # layer 15 + x = packnet_conv2d(x, 64, 3, 1) + # layer 25 x = packnet_inverse_depth(x) # ================ DECODER ================= return keras.Model(inputs=input, outputs=x, name="PackNet") + +if __name__ == '__main__': + # This is the implementation used by the packnet sfm paper + make_packnet(features_3d=8, skip_add=False).summary() \ No newline at end of file From 625ecba731b8e93a3055612d3d68f3220b0aa15e Mon Sep 17 00:00:00 2001 From: Michael Pivato Date: Sun, 8 Aug 2021 09:25:55 +0000 Subject: [PATCH 3/3] Add small option to packnet, fix docs and first/final conv layers (prev 32) --- packnet_functional.py | 55 ++++++++++++++++++++++++++----------------- 1 file changed, 33 insertions(+), 22 deletions(-) diff --git a/packnet_functional.py b/packnet_functional.py index 3929df6..f2f8765 100644 --- a/packnet_functional.py +++ b/packnet_functional.py @@ -52,7 +52,7 @@ def packnet_inverse_depth(inputs, out_channels=1, min_depth=0.5): def pack_3d(inputs, kernel_size, r=2, features_3d=8): """ - Implementatino of the 3d packing block proposed here: https://arxiv.org/abs/1905.02693 + Implementation of the 3d packing block proposed here: https://arxiv.org/abs/1905.02693 :param inputs: Tensor inputs :param kernel_size: Conv3D kernels size :param r: Packing factor @@ -79,17 +79,22 @@ def unpack_3d(inputs, out_channels, kernel_size, r=2, features_3d=8): # TODO: Support different channel format (right now we're supporting NHWC, we should also support NCHW) -def make_packnet(shape=(224, 224, 3), skip_add=True, features_3d=4, dropout=None): +def make_packnet(shape=(224, 224, 3), skip_add=False, features_3d=8, dropout=None, small=False): """ Make the PackNet depth network. :param shape: Input shape of the image :param skip_add: Set to use add rather than concat skip connections, defaults to True - :return: + :param features_3d: Number of layers in 3D conv for packing/unpacking layers + :param dropout: Whether to build the model with dropout layers fpr regularisation. Useful in training only + :param small: Set True to not include the middle-most layer. Reduces params from ~128M -> ~34M + Further reductions can be achieved by using additive skip connections and less 3d features (down to min ~10M) + :return: Packnet Keras Model """ # ================ ENCODER ================= input = keras.layers.Input(shape=shape) - x = packnet_conv2d(input, 32, 5, 1) + initial_conv_channels = 32 if small else 64 + x = packnet_conv2d(input, initial_conv_channels, 5, 1) skip_1 = x x = packnet_conv2d(x, 64, 7, 1) x = pack_3d(x, 5, features_3d=features_3d) @@ -103,53 +108,59 @@ def make_packnet(shape=(224, 224, 3), skip_add=True, features_3d=4, dropout=None x = residual_block(x, 256, 3, 1, dropout) x = pack_3d(x, 3, features_3d=features_3d) skip_5 = x - x = residual_block(x, 512, 3, 1, dropout) - x = pack_3d(x, 3, features_3d=features_3d) + if not small: + x = residual_block(x, 512, 3, 1, dropout) + x = pack_3d(x, 3, features_3d=features_3d) # ================ ENCODER ================= # ================ DECODER ================= # Addition requires we half the outputs so there is a matching number of channels divide_factor = (2 if skip_add else 1) - # layer 12 - 13 - x = unpack_3d(x, 512 // divide_factor, 3, features_3d=features_3d) - x = keras.layers.Add()( - [x, skip_5]) if skip_add else keras.layers.Concatenate()([x, skip_5]) - x = packnet_conv2d(x, 512, 3, 1) - # layer 14 - 15 + # layer 7 + if not small: + x = unpack_3d(x, 512 // divide_factor, 3, features_3d=features_3d) + x = keras.layers.Add()( + [x, skip_5]) if skip_add else keras.layers.Concatenate()([x, skip_5]) + x = packnet_conv2d(x, 512, 3, 1) + # layer 8 x = unpack_3d(x, 256 // divide_factor, 3, features_3d=features_3d) x = keras.layers.Add()( [x, skip_4]) if skip_add else keras.layers.Concatenate()([x, skip_4]) x = packnet_conv2d(x, 256, 3, 1) layer_8 = x - # layer 16 + # layer 9 x = packnet_inverse_depth(x, 1) - # layer 17 - 18 + # layer 10 u_layer_8 = unpack_3d(layer_8, 128 // divide_factor, 3, features_3d=features_3d) x = keras.layers.UpSampling2D()(x) x = keras.layers.Add()([u_layer_8, skip_3, x]) if skip_add else keras.layers.Concatenate()([u_layer_8, skip_3, x]) x = packnet_conv2d(x, 128, 3, 1) layer_10 = x - # layer 19 + # layer 11 x = packnet_inverse_depth(x, 1) - # layer 20 - 21 + # layer 12 u_layer_10 = unpack_3d(layer_10, 64, 3, features_3d=features_3d) x = keras.layers.UpSampling2D()(x) x = keras.layers.Add()([u_layer_10, skip_2, x]) if skip_add else keras.layers.Concatenate()([u_layer_10, skip_2, x]) x = packnet_conv2d(x, 64, 3, 1) layer_12 = x - # layer 22 + # layer 13 x = packnet_inverse_depth(x) - # layer 23 - 24 - u_layer_12 = unpack_3d(layer_12, 64, 3, features_3d=features_3d) + # layer 14 + u_layer_12 = unpack_3d(layer_12, initial_conv_channels, 3, features_3d=features_3d) x = keras.layers.UpSampling2D()(x) x = keras.layers.Add()([u_layer_12, skip_1, x]) if skip_add else keras.layers.Concatenate()([u_layer_12, skip_1, x]) - x = packnet_conv2d(x, 64, 3, 1) - # layer 25 + x = packnet_conv2d(x, initial_conv_channels, 3, 1) + # layer 15 x = packnet_inverse_depth(x) # ================ DECODER ================= return keras.Model(inputs=input, outputs=x, name="PackNet") + if __name__ == '__main__': # This is the implementation used by the packnet sfm paper - make_packnet(features_3d=8, skip_add=False).summary() \ No newline at end of file + make_packnet().summary() + + # This is the very small version of packnet + make_packnet(small=True, features_3d=1, skip_add=True).summary()