Files
fast-depth-tf/fast_depth_functional.py

104 lines
3.7 KiB
Python

import tensorflow.keras as keras
from load import load_nyu_evaluate
from metric import *
from util import crop_and_resize
"""
Unofficial tensorflow keras implementation of FastDepth (mobilenet_nnconv5).
PyTorch (official) Fast Depth Implementation: https://github.com/dwofk/fast-depth
"""
# Ripped from:
# https://forums.developer.nvidia.com/t/could-not-create-cudnn-handle-cudnn-status-alloc-failed/108261/4?u=mpivato4
# Seems to be an issue on windows so explicitly set gpu growth
def fix_windows_gpu():
"""
Fixes Windows GPU bug when attempting to allocate memory using cuDNN
"""
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
# Currently, memory growth needs to be the same across GPUs
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
logical_gpus = tf.config.experimental.list_logical_devices('GPU')
print(len(gpus), "Physical GPUs,", len(
logical_gpus), "Logical GPUs")
except RuntimeError as e:
# Memory growth must be set before GPUs have been initialized
print(e)
def nnconv5(inputs,
out_channels,
block_id=1,
skip_connection=None):
x = keras.layers.DepthwiseConv2D(5, padding='same')(inputs)
x = keras.layers.BatchNormalization()(x)
x = keras.layers.ReLU(6.)(x)
x = keras.layers.Conv2D(out_channels, 1, padding='same')(x)
x = keras.layers.BatchNormalization()(x)
x = keras.layers.UpSampling2D()(x)
if skip_connection is not None:
x = keras.layers.Add()([x, skip_connection])
return keras.layers.ReLU(6., name='conv_pw_%d_relu' % block_id)(x)
def mobilenet_nnconv5(weights=None, shape=(224, 224, 3)):
"""
Replication of the FastDepth model in Tensorflow, using the keras Functional API
:param weights: Pretrained weights for MobileNet, defaults to None
:param shape: Input shape of the image, defaults to (224, 224, 3)
:return: FastDepth keras Model
"""
input = keras.layers.Input(shape=shape)
mobilenet = keras.applications.MobileNet(
input_shape=shape, input_tensor=input, include_top=False, weights=weights)
for layer in mobilenet.layers:
layer.trainable = True
# Fast depth decoder
x = nnconv5(mobilenet.output, 512, block_id=14)
x = nnconv5(x, 256, block_id=15, skip_connection=mobilenet.get_layer(
name="conv_pw_5_relu").output)
x = nnconv5(x, 128, block_id=16, skip_connection=mobilenet.get_layer(
name="conv_pw_3_relu").output)
x = nnconv5(x, 64, block_id=17, skip_connection=mobilenet.get_layer(
name="conv_pw_1_relu").output)
x = nnconv5(x, 32, block_id=18)
x = keras.layers.Conv2D(1, 1, padding='same')(x)
x = keras.layers.BatchNormalization()(x)
x = keras.layers.ReLU(6.)(x)
return keras.Model(inputs=input, outputs=x, name="fast_depth")
def evaluate(compiled_model, dataset=None):
"""
Evaluate the model using rmse, delta1/2/3 metrics
:param compiled_model: Compiled, trained model to evaluate
:param dataset: Dataset for evaluation. Should be of format {'image': image, 'depth': label},
where label width/height matches image width/height.
Defaults to Tensorflow nyu_v2 evaluation split dataset (https://www.tensorflow.org/datasets/catalog/nyu_depth_v2)
"""
if dataset is None:
dataset = load_nyu_evaluate()
compiled_model.evaluate(dataset, verbose=1)
def forward(model, image):
"""
Propagate a single or batch of images through the model. Image(s) should be in format NHWC
:param model:
:param image:
:return:
"""
return model(crop_and_resize(image))
if __name__ == '__main__':
model = mobilenet_nnconv5()
model.summary()