Commit ebfc313f authored by Abdullah Rashwan's avatar Abdullah Rashwan Committed by A. Unique TensorFlower
Browse files

Internal change

PiperOrigin-RevId: 338094579
parent 43539545
......@@ -31,6 +31,7 @@ class ASPP(tf.keras.layers.Layer):
use_sync_bn=False,
norm_momentum=0.99,
norm_epsilon=0.001,
activation='relu',
dropout_rate=0.0,
kernel_initializer='VarianceScaling',
kernel_regularizer=None,
......@@ -46,6 +47,7 @@ class ASPP(tf.keras.layers.Layer):
norm_momentum: `float` normalization omentum for the moving average.
norm_epsilon: `float` small float added to variance to avoid dividing by
zero.
activation: `str` activation to be used in ASPP.
dropout_rate: `float` rate for dropout regularization.
kernel_initializer: kernel_initializer for convolutional layers.
kernel_regularizer: tf.keras.regularizers.Regularizer object for Conv2D.
......@@ -61,6 +63,7 @@ class ASPP(tf.keras.layers.Layer):
'use_sync_bn': use_sync_bn,
'norm_momentum': norm_momentum,
'norm_epsilon': norm_epsilon,
'activation': activation,
'dropout_rate': dropout_rate,
'kernel_initializer': kernel_initializer,
'kernel_regularizer': kernel_regularizer,
......@@ -74,6 +77,7 @@ class ASPP(tf.keras.layers.Layer):
use_sync_bn=self._config_dict['use_sync_bn'],
batchnorm_momentum=self._config_dict['norm_momentum'],
batchnorm_epsilon=self._config_dict['norm_epsilon'],
activation=self._config_dict['activation'],
dropout=self._config_dict['dropout_rate'],
kernel_initializer=self._config_dict['kernel_initializer'],
kernel_regularizer=self._config_dict['kernel_regularizer'],
......
......@@ -64,6 +64,7 @@ class ASPPTest(parameterized.TestCase, tf.test.TestCase):
use_sync_bn=False,
norm_momentum=0.99,
norm_epsilon=0.001,
activation='relu',
kernel_initializer='VarianceScaling',
kernel_regularizer=None,
interpolation='bilinear',
......
......@@ -61,6 +61,7 @@ def build_decoder(input_specs,
use_sync_bn=norm_activation_config.use_sync_bn,
norm_momentum=norm_activation_config.norm_momentum,
norm_epsilon=norm_activation_config.norm_epsilon,
activation=norm_activation_config.activation,
kernel_regularizer=l2_regularizer)
else:
raise ValueError('Decoder {!r} not implement'.format(decoder_type))
......
......@@ -33,6 +33,7 @@ class SpatialPyramidPooling(tf.keras.layers.Layer):
use_sync_bn=False,
batchnorm_momentum=0.99,
batchnorm_epsilon=0.001,
activation='relu',
dropout=0.5,
kernel_initializer='glorot_uniform',
kernel_regularizer=None,
......@@ -48,6 +49,7 @@ class SpatialPyramidPooling(tf.keras.layers.Layer):
0.99.
batchnorm_epsilon: A float for the epsilon value in BatchNorm. Defaults to
0.001.
activation: A `str` for type of activation to be used. Defaults to 'relu'.
dropout: A float for the dropout rate before output. Defaults to 0.5.
kernel_initializer: Kernel initializer for conv layers. Defaults to
`glorot_uniform`.
......@@ -63,6 +65,7 @@ class SpatialPyramidPooling(tf.keras.layers.Layer):
self.use_sync_bn = use_sync_bn
self.batchnorm_momentum = batchnorm_momentum
self.batchnorm_epsilon = batchnorm_epsilon
self.activation = activation
self.dropout = dropout
self.kernel_initializer = tf.keras.initializers.get(kernel_initializer)
self.kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer)
......@@ -96,7 +99,7 @@ class SpatialPyramidPooling(tf.keras.layers.Layer):
axis=bn_axis,
momentum=self.batchnorm_momentum,
epsilon=self.batchnorm_epsilon),
tf.keras.layers.Activation('relu')
tf.keras.layers.Activation(self.activation)
])
self.aspp_layers.append(conv_sequential)
......@@ -109,7 +112,7 @@ class SpatialPyramidPooling(tf.keras.layers.Layer):
dilation_rate=dilation_rate, use_bias=False),
bn_op(axis=bn_axis, momentum=self.batchnorm_momentum,
epsilon=self.batchnorm_epsilon),
tf.keras.layers.Activation('relu')])
tf.keras.layers.Activation(self.activation)])
self.aspp_layers.append(conv_sequential)
pool_sequential = tf.keras.Sequential([
......@@ -124,7 +127,7 @@ class SpatialPyramidPooling(tf.keras.layers.Layer):
axis=bn_axis,
momentum=self.batchnorm_momentum,
epsilon=self.batchnorm_epsilon),
tf.keras.layers.Activation('relu'),
tf.keras.layers.Activation(self.activation),
tf.keras.layers.experimental.preprocessing.Resizing(
height, width, interpolation=self.interpolation)])
self.aspp_layers.append(pool_sequential)
......@@ -139,7 +142,7 @@ class SpatialPyramidPooling(tf.keras.layers.Layer):
axis=bn_axis,
momentum=self.batchnorm_momentum,
epsilon=self.batchnorm_epsilon),
tf.keras.layers.Activation('relu'),
tf.keras.layers.Activation(self.activation),
tf.keras.layers.Dropout(rate=self.dropout)])
def call(self, inputs, training=None):
......@@ -159,6 +162,7 @@ class SpatialPyramidPooling(tf.keras.layers.Layer):
'use_sync_bn': self.use_sync_bn,
'batchnorm_momentum': self.batchnorm_momentum,
'batchnorm_epsilon': self.batchnorm_epsilon,
'activation': self.activation,
'dropout': self.dropout,
'kernel_initializer': tf.keras.initializers.serialize(
self.kernel_initializer),
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment