Merge branch 'coreml-2' into 'main'
Coreml 2 See merge request vato007/fast-depth-tf!9
This commit is contained in:
@@ -1,8 +1,8 @@
|
|||||||
import coremltools as ct
|
import coremltools as ct
|
||||||
|
|
||||||
|
|
||||||
def convert_coreml(model_path, save_path='../mobilenet_nnconv5.mlmodel'):
|
def convert_coreml(model_path, save_path='mobilenet_nnconv5.mlmodel'):
|
||||||
mlmodel = ct.convert(model_path)
|
mlmodel = ct.convert(model_path, inputs=[ct.ImageType()])
|
||||||
mlmodel.save(save_path)
|
mlmodel.save(save_path)
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
import tensorflow.keras as keras
|
import tensorflow.keras as keras
|
||||||
import tensorflow_datasets as tfds
|
|
||||||
|
|
||||||
import fast_depth_functional as fd
|
import fast_depth_functional as fd
|
||||||
|
|
||||||
@@ -12,12 +11,23 @@ def dense_upsample_block(input, out_channels, skip_connection):
|
|||||||
x = keras.layers.Concatenate()([x, skip_connection])
|
x = keras.layers.Concatenate()([x, skip_connection])
|
||||||
x = keras.layers.Conv2D(filters=out_channels,
|
x = keras.layers.Conv2D(filters=out_channels,
|
||||||
kernel_size=3, strides=1, padding='same')(x)
|
kernel_size=3, strides=1, padding='same')(x)
|
||||||
|
x = keras.layers.LeakyReLU(alpha=0.2)(x)
|
||||||
x = keras.layers.Conv2D(filters=out_channels,
|
x = keras.layers.Conv2D(filters=out_channels,
|
||||||
kernel_size=3, strides=1, padding='same')(x)
|
kernel_size=3, strides=1, padding='same')(x)
|
||||||
return keras.layers.LeakyReLU(alpha=0.2)(x)
|
return keras.layers.LeakyReLU(alpha=0.2)(x)
|
||||||
|
|
||||||
|
|
||||||
def dense_depth(size, weights=None, shape=(224, 224, 3)):
|
def dense_depth(size, weights=None, shape=(224, 224, 3)):
|
||||||
|
"""
|
||||||
|
Make the dense depth network graph using keras functional api.
|
||||||
|
|
||||||
|
Note that you should use the dense depth loss function, and use Adam as the optimiser with a learning rate of 0.0001
|
||||||
|
(default learning rate of Adam is 0.001).
|
||||||
|
:param size:
|
||||||
|
:param weights:
|
||||||
|
:param shape:
|
||||||
|
:return:
|
||||||
|
"""
|
||||||
input = keras.layers.Input(shape=shape)
|
input = keras.layers.Input(shape=shape)
|
||||||
densenet = dense_net(input, size, weights, shape)
|
densenet = dense_net(input, size, weights, shape)
|
||||||
|
|
||||||
@@ -37,6 +47,8 @@ def dense_depth(size, weights=None, shape=(224, 224, 3)):
|
|||||||
decoder = dense_upsample_block(
|
decoder = dense_upsample_block(
|
||||||
decoder, densenet_output_channels // 16, densenet.get_layer('conv1/relu').output)
|
decoder, densenet_output_channels // 16, densenet.get_layer('conv1/relu').output)
|
||||||
|
|
||||||
|
decoder = dense_upsample_block(decoder, int(densenet_output_channels / 32), input)
|
||||||
|
|
||||||
conv3 = keras.layers.Conv2D(
|
conv3 = keras.layers.Conv2D(
|
||||||
filters=1, kernel_size=3, strides=1, padding='same', name='conv3')(decoder)
|
filters=1, kernel_size=3, strides=1, padding='same', name='conv3')(decoder)
|
||||||
return keras.Model(inputs=input, outputs=conv3, name='dense_depth')
|
return keras.Model(inputs=input, outputs=conv3, name='dense_depth')
|
||||||
|
|||||||
@@ -72,6 +72,7 @@ def mobilenet_nnconv5(weights=None, shape=(224, 224, 3)):
|
|||||||
x = keras.layers.Conv2D(1, 1, padding='same')(x)
|
x = keras.layers.Conv2D(1, 1, padding='same')(x)
|
||||||
x = keras.layers.BatchNormalization()(x)
|
x = keras.layers.BatchNormalization()(x)
|
||||||
x = keras.layers.ReLU(6.)(x)
|
x = keras.layers.ReLU(6.)(x)
|
||||||
|
x = keras.layers.Reshape([shape[0], shape[1]])(x)
|
||||||
return keras.Model(inputs=input, outputs=x, name="fast_depth")
|
return keras.Model(inputs=input, outputs=x, name="fast_depth")
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -5,6 +5,10 @@ def dense_depth_loss_function(y, y_pred):
|
|||||||
"""
|
"""
|
||||||
Implementation of the loss from the dense depth paper https://arxiv.org/pdf/1812.11941.pdf
|
Implementation of the loss from the dense depth paper https://arxiv.org/pdf/1812.11941.pdf
|
||||||
"""
|
"""
|
||||||
|
if len(y.shape) == 3:
|
||||||
|
y = tf.expand_dims(y, 3)
|
||||||
|
if len(y_pred.shape) == 3:
|
||||||
|
y_pred = tf.expand_dims(y_pred, 3)
|
||||||
# Point-wise L1 loss
|
# Point-wise L1 loss
|
||||||
l1_depth = tf.reduce_mean(tf.math.abs(y_pred - y), axis=-1)
|
l1_depth = tf.reduce_mean(tf.math.abs(y_pred - y), axis=-1)
|
||||||
|
|
||||||
@@ -15,6 +19,6 @@ def dense_depth_loss_function(y, y_pred):
|
|||||||
tf.math.abs(dx_pred - dx), axis=-1)
|
tf.math.abs(dx_pred - dx), axis=-1)
|
||||||
|
|
||||||
# Structural Similarity (SSIM)
|
# Structural Similarity (SSIM)
|
||||||
ssim = (1 - tf.image.ssim(y, y_pred, 500)) / 2
|
ssim = tf.clip_by_value((1 - tf.image.ssim(y, y_pred, 100)) / 2, 0, 1)
|
||||||
|
|
||||||
return 0.1 * tf.reduce_mean(l1_depth) + tf.reduce_mean(gradient) + ssim
|
return 0.1 * tf.reduce_mean(l1_depth) + tf.reduce_mean(gradient) + ssim
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
import tensorflow as tf
|
import tensorflow as tf
|
||||||
|
|
||||||
|
|
||||||
def delta1_metric(y_true, y_pred):
|
def delta1(y_true, y_pred):
|
||||||
max_ratio = tf.maximum(y_pred / y_true, y_true / y_pred)
|
max_ratio = tf.maximum(y_pred / y_true, y_true / y_pred)
|
||||||
return tf.reduce_mean(tf.cast(max_ratio < tf.convert_to_tensor(1.25), tf.float32))
|
return tf.reduce_mean(tf.cast(max_ratio < tf.convert_to_tensor(1.25), tf.float32))
|
||||||
|
|
||||||
|
|||||||
2
train.py
2
train.py
@@ -19,7 +19,7 @@ def compile(model, optimiser=keras.optimizers.SGD(), loss=keras.losses.MeanSquar
|
|||||||
loss=loss,
|
loss=loss,
|
||||||
metrics=[keras.metrics.RootMeanSquaredError(),
|
metrics=[keras.metrics.RootMeanSquaredError(),
|
||||||
keras.metrics.MeanSquaredError(),
|
keras.metrics.MeanSquaredError(),
|
||||||
delta1_metric,
|
delta1,
|
||||||
delta2,
|
delta2,
|
||||||
delta3,
|
delta3,
|
||||||
keras.metrics.MeanAbsolutePercentageError(),
|
keras.metrics.MeanAbsolutePercentageError(),
|
||||||
|
|||||||
Reference in New Issue
Block a user