Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ModelZoo
ResNet50_tensorflow
Commits
ebfc313f
Commit
ebfc313f
authored
Oct 20, 2020
by
Abdullah Rashwan
Committed by
A. Unique TensorFlower
Oct 20, 2020
Browse files
Internal change
PiperOrigin-RevId: 338094579
parent
43539545
Changes
4
Show whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
14 additions
and
4 deletions
+14
-4
official/vision/beta/modeling/decoders/aspp.py
official/vision/beta/modeling/decoders/aspp.py
+4
-0
official/vision/beta/modeling/decoders/aspp_test.py
official/vision/beta/modeling/decoders/aspp_test.py
+1
-0
official/vision/beta/modeling/decoders/factory.py
official/vision/beta/modeling/decoders/factory.py
+1
-0
official/vision/keras_cv/layers/deeplab.py
official/vision/keras_cv/layers/deeplab.py
+8
-4
No files found.
official/vision/beta/modeling/decoders/aspp.py
View file @
ebfc313f
...
...
@@ -31,6 +31,7 @@ class ASPP(tf.keras.layers.Layer):
use_sync_bn
=
False
,
norm_momentum
=
0.99
,
norm_epsilon
=
0.001
,
activation
=
'relu'
,
dropout_rate
=
0.0
,
kernel_initializer
=
'VarianceScaling'
,
kernel_regularizer
=
None
,
...
...
@@ -46,6 +47,7 @@ class ASPP(tf.keras.layers.Layer):
norm_momentum: `float` normalization omentum for the moving average.
norm_epsilon: `float` small float added to variance to avoid dividing by
zero.
activation: `str` activation to be used in ASPP.
dropout_rate: `float` rate for dropout regularization.
kernel_initializer: kernel_initializer for convolutional layers.
kernel_regularizer: tf.keras.regularizers.Regularizer object for Conv2D.
...
...
@@ -61,6 +63,7 @@ class ASPP(tf.keras.layers.Layer):
'use_sync_bn'
:
use_sync_bn
,
'norm_momentum'
:
norm_momentum
,
'norm_epsilon'
:
norm_epsilon
,
'activation'
:
activation
,
'dropout_rate'
:
dropout_rate
,
'kernel_initializer'
:
kernel_initializer
,
'kernel_regularizer'
:
kernel_regularizer
,
...
...
@@ -74,6 +77,7 @@ class ASPP(tf.keras.layers.Layer):
use_sync_bn
=
self
.
_config_dict
[
'use_sync_bn'
],
batchnorm_momentum
=
self
.
_config_dict
[
'norm_momentum'
],
batchnorm_epsilon
=
self
.
_config_dict
[
'norm_epsilon'
],
activation
=
self
.
_config_dict
[
'activation'
],
dropout
=
self
.
_config_dict
[
'dropout_rate'
],
kernel_initializer
=
self
.
_config_dict
[
'kernel_initializer'
],
kernel_regularizer
=
self
.
_config_dict
[
'kernel_regularizer'
],
...
...
official/vision/beta/modeling/decoders/aspp_test.py
View file @
ebfc313f
...
...
@@ -64,6 +64,7 @@ class ASPPTest(parameterized.TestCase, tf.test.TestCase):
use_sync_bn
=
False
,
norm_momentum
=
0.99
,
norm_epsilon
=
0.001
,
activation
=
'relu'
,
kernel_initializer
=
'VarianceScaling'
,
kernel_regularizer
=
None
,
interpolation
=
'bilinear'
,
...
...
official/vision/beta/modeling/decoders/factory.py
View file @
ebfc313f
...
...
@@ -61,6 +61,7 @@ def build_decoder(input_specs,
use_sync_bn
=
norm_activation_config
.
use_sync_bn
,
norm_momentum
=
norm_activation_config
.
norm_momentum
,
norm_epsilon
=
norm_activation_config
.
norm_epsilon
,
activation
=
norm_activation_config
.
activation
,
kernel_regularizer
=
l2_regularizer
)
else
:
raise
ValueError
(
'Decoder {!r} not implement'
.
format
(
decoder_type
))
...
...
official/vision/keras_cv/layers/deeplab.py
View file @
ebfc313f
...
...
@@ -33,6 +33,7 @@ class SpatialPyramidPooling(tf.keras.layers.Layer):
use_sync_bn
=
False
,
batchnorm_momentum
=
0.99
,
batchnorm_epsilon
=
0.001
,
activation
=
'relu'
,
dropout
=
0.5
,
kernel_initializer
=
'glorot_uniform'
,
kernel_regularizer
=
None
,
...
...
@@ -48,6 +49,7 @@ class SpatialPyramidPooling(tf.keras.layers.Layer):
0.99.
batchnorm_epsilon: A float for the epsilon value in BatchNorm. Defaults to
0.001.
activation: A `str` for type of activation to be used. Defaults to 'relu'.
dropout: A float for the dropout rate before output. Defaults to 0.5.
kernel_initializer: Kernel initializer for conv layers. Defaults to
`glorot_uniform`.
...
...
@@ -63,6 +65,7 @@ class SpatialPyramidPooling(tf.keras.layers.Layer):
self
.
use_sync_bn
=
use_sync_bn
self
.
batchnorm_momentum
=
batchnorm_momentum
self
.
batchnorm_epsilon
=
batchnorm_epsilon
self
.
activation
=
activation
self
.
dropout
=
dropout
self
.
kernel_initializer
=
tf
.
keras
.
initializers
.
get
(
kernel_initializer
)
self
.
kernel_regularizer
=
tf
.
keras
.
regularizers
.
get
(
kernel_regularizer
)
...
...
@@ -96,7 +99,7 @@ class SpatialPyramidPooling(tf.keras.layers.Layer):
axis
=
bn_axis
,
momentum
=
self
.
batchnorm_momentum
,
epsilon
=
self
.
batchnorm_epsilon
),
tf
.
keras
.
layers
.
Activation
(
'relu'
)
tf
.
keras
.
layers
.
Activation
(
self
.
activation
)
])
self
.
aspp_layers
.
append
(
conv_sequential
)
...
...
@@ -109,7 +112,7 @@ class SpatialPyramidPooling(tf.keras.layers.Layer):
dilation_rate
=
dilation_rate
,
use_bias
=
False
),
bn_op
(
axis
=
bn_axis
,
momentum
=
self
.
batchnorm_momentum
,
epsilon
=
self
.
batchnorm_epsilon
),
tf
.
keras
.
layers
.
Activation
(
'relu'
)])
tf
.
keras
.
layers
.
Activation
(
self
.
activation
)])
self
.
aspp_layers
.
append
(
conv_sequential
)
pool_sequential
=
tf
.
keras
.
Sequential
([
...
...
@@ -124,7 +127,7 @@ class SpatialPyramidPooling(tf.keras.layers.Layer):
axis
=
bn_axis
,
momentum
=
self
.
batchnorm_momentum
,
epsilon
=
self
.
batchnorm_epsilon
),
tf
.
keras
.
layers
.
Activation
(
'relu'
),
tf
.
keras
.
layers
.
Activation
(
self
.
activation
),
tf
.
keras
.
layers
.
experimental
.
preprocessing
.
Resizing
(
height
,
width
,
interpolation
=
self
.
interpolation
)])
self
.
aspp_layers
.
append
(
pool_sequential
)
...
...
@@ -139,7 +142,7 @@ class SpatialPyramidPooling(tf.keras.layers.Layer):
axis
=
bn_axis
,
momentum
=
self
.
batchnorm_momentum
,
epsilon
=
self
.
batchnorm_epsilon
),
tf
.
keras
.
layers
.
Activation
(
'relu'
),
tf
.
keras
.
layers
.
Activation
(
self
.
activation
),
tf
.
keras
.
layers
.
Dropout
(
rate
=
self
.
dropout
)])
def
call
(
self
,
inputs
,
training
=
None
):
...
...
@@ -159,6 +162,7 @@ class SpatialPyramidPooling(tf.keras.layers.Layer):
'use_sync_bn'
:
self
.
use_sync_bn
,
'batchnorm_momentum'
:
self
.
batchnorm_momentum
,
'batchnorm_epsilon'
:
self
.
batchnorm_epsilon
,
'activation'
:
self
.
activation
,
'dropout'
:
self
.
dropout
,
'kernel_initializer'
:
tf
.
keras
.
initializers
.
serialize
(
self
.
kernel_initializer
),
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment