Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ModelZoo
ResNet50_tensorflow
Commits
790e49e5
Commit
790e49e5
authored
Mar 23, 2021
by
stephenwu
Browse files
Merge branch 'master' of
https://github.com/tensorflow/models
into run_superglue
parents
8ab018b0
5bb827c3
Changes
378
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
790 additions
and
146 deletions
+790
-146
official/vision/beta/modeling/backbones/resnet_deeplab_test.py
...ial/vision/beta/modeling/backbones/resnet_deeplab_test.py
+3
-3
official/vision/beta/modeling/backbones/resnet_test.py
official/vision/beta/modeling/backbones/resnet_test.py
+3
-3
official/vision/beta/modeling/backbones/revnet.py
official/vision/beta/modeling/backbones/revnet.py
+3
-3
official/vision/beta/modeling/backbones/revnet_test.py
official/vision/beta/modeling/backbones/revnet_test.py
+3
-3
official/vision/beta/modeling/backbones/spinenet.py
official/vision/beta/modeling/backbones/spinenet.py
+3
-3
official/vision/beta/modeling/backbones/spinenet_mobile.py
official/vision/beta/modeling/backbones/spinenet_mobile.py
+522
-0
official/vision/beta/modeling/backbones/spinenet_mobile_test.py
...al/vision/beta/modeling/backbones/spinenet_mobile_test.py
+111
-0
official/vision/beta/modeling/backbones/spinenet_test.py
official/vision/beta/modeling/backbones/spinenet_test.py
+3
-3
official/vision/beta/modeling/classification_model.py
official/vision/beta/modeling/classification_model.py
+27
-26
official/vision/beta/modeling/classification_model_test.py
official/vision/beta/modeling/classification_model_test.py
+3
-3
official/vision/beta/modeling/decoders/__init__.py
official/vision/beta/modeling/decoders/__init__.py
+3
-3
official/vision/beta/modeling/decoders/aspp.py
official/vision/beta/modeling/decoders/aspp.py
+33
-29
official/vision/beta/modeling/decoders/aspp_test.py
official/vision/beta/modeling/decoders/aspp_test.py
+3
-3
official/vision/beta/modeling/decoders/factory.py
official/vision/beta/modeling/decoders/factory.py
+9
-7
official/vision/beta/modeling/decoders/fpn.py
official/vision/beta/modeling/decoders/fpn.py
+26
-24
official/vision/beta/modeling/decoders/fpn_test.py
official/vision/beta/modeling/decoders/fpn_test.py
+3
-3
official/vision/beta/modeling/decoders/nasfpn.py
official/vision/beta/modeling/decoders/nasfpn.py
+25
-23
official/vision/beta/modeling/decoders/nasfpn_test.py
official/vision/beta/modeling/decoders/nasfpn_test.py
+3
-3
official/vision/beta/modeling/factory.py
official/vision/beta/modeling/factory.py
+2
-2
official/vision/beta/modeling/factory_3d.py
official/vision/beta/modeling/factory_3d.py
+2
-2
No files found.
official/vision/beta/modeling/backbones/resnet_deeplab_test.py
View file @
790e49e5
# Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
...
...
@@ -12,7 +11,8 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Lint as: python3
"""Tests for resnet_deeplab models."""
# Import libraries
...
...
official/vision/beta/modeling/backbones/resnet_test.py
View file @
790e49e5
# Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
...
...
@@ -12,7 +11,8 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Lint as: python3
"""Tests for resnet."""
# Import libraries
...
...
official/vision/beta/modeling/backbones/revnet.py
View file @
790e49e5
# Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
...
...
@@ -12,7 +11,8 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================="""
# Lint as: python3
"""Contains definitions of RevNet."""
from
typing
import
Any
,
Callable
,
Dict
,
Optional
...
...
official/vision/beta/modeling/backbones/revnet_test.py
View file @
790e49e5
# Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
...
...
@@ -12,7 +11,8 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Lint as: python3
"""Tests for RevNet."""
# Import libraries
...
...
official/vision/beta/modeling/backbones/spinenet.py
View file @
790e49e5
# Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
...
...
@@ -12,7 +11,8 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Lint as: python3
"""Contains definitions of SpineNet Networks."""
import
math
...
...
official/vision/beta/modeling/backbones/spinenet_mobile.py
0 → 100644
View file @
790e49e5
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains definitions of Mobile SpineNet Networks."""
import
math
# Import libraries
from
absl
import
logging
import
tensorflow
as
tf
from
official.modeling
import
tf_utils
from
official.vision.beta.modeling.backbones
import
factory
from
official.vision.beta.modeling.layers
import
nn_blocks
from
official.vision.beta.modeling.layers
import
nn_layers
from
official.vision.beta.ops
import
spatial_transform_ops
layers
=
tf
.
keras
.
layers
FILTER_SIZE_MAP
=
{
0
:
8
,
1
:
16
,
2
:
24
,
3
:
40
,
4
:
80
,
5
:
112
,
6
:
112
,
7
:
112
,
}
# The fixed SpineNet architecture discovered by NAS.
# Each element represents a specification of a building block:
# (block_level, block_fn, (input_offset0, input_offset1), is_output).
SPINENET_BLOCK_SPECS
=
[
(
2
,
'mbconv'
,
(
0
,
1
),
False
),
(
2
,
'mbconv'
,
(
1
,
2
),
False
),
(
4
,
'mbconv'
,
(
1
,
2
),
False
),
(
3
,
'mbconv'
,
(
3
,
4
),
False
),
(
4
,
'mbconv'
,
(
3
,
5
),
False
),
(
6
,
'mbconv'
,
(
4
,
6
),
False
),
(
4
,
'mbconv'
,
(
4
,
6
),
False
),
(
5
,
'mbconv'
,
(
7
,
8
),
False
),
(
7
,
'mbconv'
,
(
7
,
9
),
False
),
(
5
,
'mbconv'
,
(
9
,
10
),
False
),
(
5
,
'mbconv'
,
(
9
,
11
),
False
),
(
4
,
'mbconv'
,
(
6
,
11
),
True
),
(
3
,
'mbconv'
,
(
5
,
11
),
True
),
(
5
,
'mbconv'
,
(
8
,
13
),
True
),
(
7
,
'mbconv'
,
(
6
,
15
),
True
),
(
6
,
'mbconv'
,
(
13
,
15
),
True
),
]
SCALING_MAP
=
{
'49'
:
{
'endpoints_num_filters'
:
48
,
'filter_size_scale'
:
1.0
,
'block_repeats'
:
1
,
},
'49S'
:
{
'endpoints_num_filters'
:
40
,
'filter_size_scale'
:
0.65
,
'block_repeats'
:
1
,
},
'49XS'
:
{
'endpoints_num_filters'
:
24
,
'filter_size_scale'
:
0.6
,
'block_repeats'
:
1
,
},
}
class
BlockSpec
(
object
):
"""A container class that specifies the block configuration for SpineNet."""
def
__init__
(
self
,
level
,
block_fn
,
input_offsets
,
is_output
):
self
.
level
=
level
self
.
block_fn
=
block_fn
self
.
input_offsets
=
input_offsets
self
.
is_output
=
is_output
def
build_block_specs
(
block_specs
=
None
):
"""Builds the list of BlockSpec objects for SpineNet."""
if
not
block_specs
:
block_specs
=
SPINENET_BLOCK_SPECS
logging
.
info
(
'Building SpineNet block specs: %s'
,
block_specs
)
return
[
BlockSpec
(
*
b
)
for
b
in
block_specs
]
@
tf
.
keras
.
utils
.
register_keras_serializable
(
package
=
'Vision'
)
class
SpineNetMobile
(
tf
.
keras
.
Model
):
"""Creates a Mobile SpineNet family model.
This implements:
[1] Xianzhi Du, Tsung-Yi Lin, Pengchong Jin, Golnaz Ghiasi, Mingxing Tan,
Yin Cui, Quoc V. Le, Xiaodan Song.
SpineNet: Learning Scale-Permuted Backbone for Recognition and Localization.
(https://arxiv.org/abs/1912.05027).
[2] Xianzhi Du, Tsung-Yi Lin, Pengchong Jin, Yin Cui, Mingxing Tan,
Quoc Le, Xiaodan Song.
Efficient Scale-Permuted Backbone with Learned Resource Distribution.
(https://arxiv.org/abs/2010.11426).
"""
def
__init__
(
self
,
input_specs
=
tf
.
keras
.
layers
.
InputSpec
(
shape
=
[
None
,
512
,
512
,
3
]),
min_level
=
3
,
max_level
=
7
,
block_specs
=
build_block_specs
(),
endpoints_num_filters
=
48
,
se_ratio
=
0.2
,
block_repeats
=
1
,
filter_size_scale
=
1.0
,
expand_ratio
=
6
,
init_stochastic_depth_rate
=
0.0
,
kernel_initializer
=
'VarianceScaling'
,
kernel_regularizer
=
None
,
bias_regularizer
=
None
,
activation
=
'relu'
,
use_sync_bn
=
False
,
norm_momentum
=
0.99
,
norm_epsilon
=
0.001
,
**
kwargs
):
"""Initializes a Mobile SpineNet model.
Args:
input_specs: A `tf.keras.layers.InputSpec` of the input tensor.
min_level: An `int` of min level for output mutiscale features.
max_level: An `int` of max level for output mutiscale features.
block_specs: The block specifications for the SpineNet model discovered by
NAS.
endpoints_num_filters: An `int` of feature dimension for the output
endpoints.
se_ratio: A `float` of Squeeze-and-Excitation ratio.
block_repeats: An `int` of number of blocks contained in the layer.
filter_size_scale: A `float` of multiplier for the filters (number of
channels) for all convolution ops. The value must be greater than zero.
Typical usage will be to set this value in (0, 1) to reduce the number
of parameters or computation cost of the model.
expand_ratio: An `integer` of expansion ratios for inverted bottleneck
blocks.
init_stochastic_depth_rate: A `float` of initial stochastic depth rate.
kernel_initializer: A str for kernel initializer of convolutional layers.
kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for
Conv2D. Default to None.
bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D.
Default to None.
activation: A `str` name of the activation function.
use_sync_bn: If True, use synchronized batch normalization.
norm_momentum: A `float` of normalization momentum for the moving average.
norm_epsilon: A small `float` added to variance to avoid dividing by zero.
**kwargs: Additional keyword arguments to be passed.
"""
self
.
_input_specs
=
input_specs
self
.
_min_level
=
min_level
self
.
_max_level
=
max_level
self
.
_block_specs
=
block_specs
self
.
_endpoints_num_filters
=
endpoints_num_filters
self
.
_se_ratio
=
se_ratio
self
.
_block_repeats
=
block_repeats
self
.
_filter_size_scale
=
filter_size_scale
self
.
_expand_ratio
=
expand_ratio
self
.
_init_stochastic_depth_rate
=
init_stochastic_depth_rate
self
.
_kernel_initializer
=
kernel_initializer
self
.
_kernel_regularizer
=
kernel_regularizer
self
.
_bias_regularizer
=
bias_regularizer
self
.
_activation
=
activation
self
.
_use_sync_bn
=
use_sync_bn
self
.
_norm_momentum
=
norm_momentum
self
.
_norm_epsilon
=
norm_epsilon
if
activation
==
'relu'
:
self
.
_activation_fn
=
tf
.
nn
.
relu
elif
activation
==
'swish'
:
self
.
_activation_fn
=
tf
.
nn
.
swish
else
:
raise
ValueError
(
'Activation {} not implemented.'
.
format
(
activation
))
self
.
_num_init_blocks
=
2
if
use_sync_bn
:
self
.
_norm
=
layers
.
experimental
.
SyncBatchNormalization
else
:
self
.
_norm
=
layers
.
BatchNormalization
if
tf
.
keras
.
backend
.
image_data_format
()
==
'channels_last'
:
self
.
_bn_axis
=
-
1
else
:
self
.
_bn_axis
=
1
# Build SpineNet.
inputs
=
tf
.
keras
.
Input
(
shape
=
input_specs
.
shape
[
1
:])
net
=
self
.
_build_stem
(
inputs
=
inputs
)
net
=
self
.
_build_scale_permuted_network
(
net
=
net
,
input_width
=
input_specs
.
shape
[
2
])
endpoints
=
self
.
_build_endpoints
(
net
=
net
)
self
.
_output_specs
=
{
l
:
endpoints
[
l
].
get_shape
()
for
l
in
endpoints
}
super
().
__init__
(
inputs
=
inputs
,
outputs
=
endpoints
)
def
_block_group
(
self
,
inputs
,
in_filters
,
out_filters
,
strides
,
expand_ratio
=
6
,
block_repeats
=
1
,
se_ratio
=
0.2
,
stochastic_depth_drop_rate
=
None
,
name
=
'block_group'
):
"""Creates one group of blocks for the SpineNet model."""
x
=
nn_blocks
.
InvertedBottleneckBlock
(
in_filters
=
in_filters
,
out_filters
=
out_filters
,
strides
=
strides
,
se_ratio
=
se_ratio
,
expand_ratio
=
expand_ratio
,
stochastic_depth_drop_rate
=
stochastic_depth_drop_rate
,
kernel_initializer
=
self
.
_kernel_initializer
,
kernel_regularizer
=
self
.
_kernel_regularizer
,
bias_regularizer
=
self
.
_bias_regularizer
,
activation
=
self
.
_activation
,
use_sync_bn
=
self
.
_use_sync_bn
,
norm_momentum
=
self
.
_norm_momentum
,
norm_epsilon
=
self
.
_norm_epsilon
)(
inputs
)
for
_
in
range
(
1
,
block_repeats
):
x
=
nn_blocks
.
InvertedBottleneckBlock
(
in_filters
=
in_filters
,
out_filters
=
out_filters
,
strides
=
1
,
se_ratio
=
se_ratio
,
expand_ratio
=
expand_ratio
,
stochastic_depth_drop_rate
=
stochastic_depth_drop_rate
,
kernel_initializer
=
self
.
_kernel_initializer
,
kernel_regularizer
=
self
.
_kernel_regularizer
,
bias_regularizer
=
self
.
_bias_regularizer
,
activation
=
self
.
_activation
,
use_sync_bn
=
self
.
_use_sync_bn
,
norm_momentum
=
self
.
_norm_momentum
,
norm_epsilon
=
self
.
_norm_epsilon
)(
inputs
)
return
tf
.
identity
(
x
,
name
=
name
)
def
_build_stem
(
self
,
inputs
):
"""Builds SpineNet stem."""
x
=
layers
.
Conv2D
(
filters
=
int
(
FILTER_SIZE_MAP
[
0
]
*
self
.
_filter_size_scale
),
kernel_size
=
3
,
strides
=
2
,
use_bias
=
False
,
padding
=
'same'
,
kernel_initializer
=
self
.
_kernel_initializer
,
kernel_regularizer
=
self
.
_kernel_regularizer
,
bias_regularizer
=
self
.
_bias_regularizer
)(
inputs
)
x
=
self
.
_norm
(
axis
=
self
.
_bn_axis
,
momentum
=
self
.
_norm_momentum
,
epsilon
=
self
.
_norm_epsilon
)(
x
)
x
=
tf_utils
.
get_activation
(
self
.
_activation_fn
)(
x
)
net
=
[]
stem_strides
=
[
1
,
2
]
# Build the initial level 2 blocks.
for
i
in
range
(
self
.
_num_init_blocks
):
x
=
self
.
_block_group
(
inputs
=
x
,
in_filters
=
int
(
FILTER_SIZE_MAP
[
i
]
*
self
.
_filter_size_scale
),
out_filters
=
int
(
FILTER_SIZE_MAP
[
i
+
1
]
*
self
.
_filter_size_scale
),
expand_ratio
=
self
.
_expand_ratio
,
strides
=
stem_strides
[
i
],
se_ratio
=
self
.
_se_ratio
,
block_repeats
=
self
.
_block_repeats
,
name
=
'stem_block_{}'
.
format
(
i
+
1
))
net
.
append
(
x
)
return
net
def
_build_scale_permuted_network
(
self
,
net
,
input_width
,
weighted_fusion
=
False
):
"""Builds scale-permuted network."""
net_sizes
=
[
int
(
math
.
ceil
(
input_width
/
2
)),
int
(
math
.
ceil
(
input_width
/
2
**
2
))
]
num_outgoing_connections
=
[
0
]
*
len
(
net
)
endpoints
=
{}
for
i
,
block_spec
in
enumerate
(
self
.
_block_specs
):
# Find out specs for the target block.
target_width
=
int
(
math
.
ceil
(
input_width
/
2
**
block_spec
.
level
))
target_num_filters
=
int
(
FILTER_SIZE_MAP
[
block_spec
.
level
]
*
self
.
_filter_size_scale
)
# Resample then merge input0 and input1.
parents
=
[]
input0
=
block_spec
.
input_offsets
[
0
]
input1
=
block_spec
.
input_offsets
[
1
]
x0
=
self
.
_resample_with_sepconv
(
inputs
=
net
[
input0
],
input_width
=
net_sizes
[
input0
],
target_width
=
target_width
,
target_num_filters
=
target_num_filters
)
parents
.
append
(
x0
)
num_outgoing_connections
[
input0
]
+=
1
x1
=
self
.
_resample_with_sepconv
(
inputs
=
net
[
input1
],
input_width
=
net_sizes
[
input1
],
target_width
=
target_width
,
target_num_filters
=
target_num_filters
)
parents
.
append
(
x1
)
num_outgoing_connections
[
input1
]
+=
1
# Merge 0 outdegree blocks to the output block.
if
block_spec
.
is_output
:
for
j
,
(
j_feat
,
j_connections
)
in
enumerate
(
zip
(
net
,
num_outgoing_connections
)):
if
j_connections
==
0
and
(
j_feat
.
shape
[
2
]
==
target_width
and
j_feat
.
shape
[
3
]
==
x0
.
shape
[
3
]):
parents
.
append
(
j_feat
)
num_outgoing_connections
[
j
]
+=
1
# pylint: disable=g-direct-tensorflow-import
if
weighted_fusion
:
dtype
=
parents
[
0
].
dtype
parent_weights
=
[
tf
.
nn
.
relu
(
tf
.
cast
(
tf
.
Variable
(
1.0
,
name
=
'block{}_fusion{}'
.
format
(
i
,
j
)),
dtype
=
dtype
))
for
j
in
range
(
len
(
parents
))]
weights_sum
=
tf
.
add_n
(
parent_weights
)
parents
=
[
parents
[
i
]
*
parent_weights
[
i
]
/
(
weights_sum
+
0.0001
)
for
i
in
range
(
len
(
parents
))
]
# Fuse all parent nodes then build a new block.
x
=
tf_utils
.
get_activation
(
self
.
_activation_fn
)(
tf
.
add_n
(
parents
))
x
=
self
.
_block_group
(
inputs
=
x
,
in_filters
=
target_num_filters
,
out_filters
=
target_num_filters
,
strides
=
1
,
se_ratio
=
self
.
_se_ratio
,
expand_ratio
=
self
.
_expand_ratio
,
block_repeats
=
self
.
_block_repeats
,
stochastic_depth_drop_rate
=
nn_layers
.
get_stochastic_depth_rate
(
self
.
_init_stochastic_depth_rate
,
i
+
1
,
len
(
self
.
_block_specs
)),
name
=
'scale_permuted_block_{}'
.
format
(
i
+
1
))
net
.
append
(
x
)
net_sizes
.
append
(
target_width
)
num_outgoing_connections
.
append
(
0
)
# Save output feats.
if
block_spec
.
is_output
:
if
block_spec
.
level
in
endpoints
:
raise
ValueError
(
'Duplicate feats found for output level {}.'
.
format
(
block_spec
.
level
))
if
(
block_spec
.
level
<
self
.
_min_level
or
block_spec
.
level
>
self
.
_max_level
):
raise
ValueError
(
'Output level is out of range [{}, {}]'
.
format
(
self
.
_min_level
,
self
.
_max_level
))
endpoints
[
str
(
block_spec
.
level
)]
=
x
return
endpoints
def
_build_endpoints
(
self
,
net
):
"""Matches filter size for endpoints before sharing conv layers."""
endpoints
=
{}
for
level
in
range
(
self
.
_min_level
,
self
.
_max_level
+
1
):
x
=
layers
.
Conv2D
(
filters
=
self
.
_endpoints_num_filters
,
kernel_size
=
1
,
strides
=
1
,
use_bias
=
False
,
kernel_initializer
=
self
.
_kernel_initializer
,
kernel_regularizer
=
self
.
_kernel_regularizer
,
bias_regularizer
=
self
.
_bias_regularizer
)(
net
[
str
(
level
)])
x
=
self
.
_norm
(
axis
=
self
.
_bn_axis
,
momentum
=
self
.
_norm_momentum
,
epsilon
=
self
.
_norm_epsilon
)(
x
)
x
=
tf_utils
.
get_activation
(
self
.
_activation_fn
)(
x
)
endpoints
[
str
(
level
)]
=
x
return
endpoints
def
_resample_with_sepconv
(
self
,
inputs
,
input_width
,
target_width
,
target_num_filters
):
"""Matches resolution and feature dimension."""
x
=
inputs
# Spatial resampling.
if
input_width
>
target_width
:
while
input_width
>
target_width
:
x
=
layers
.
DepthwiseConv2D
(
kernel_size
=
3
,
strides
=
2
,
padding
=
'SAME'
,
use_bias
=
False
,
kernel_initializer
=
self
.
_kernel_initializer
,
kernel_regularizer
=
self
.
_kernel_regularizer
,
bias_regularizer
=
self
.
_bias_regularizer
)(
x
)
x
=
self
.
_norm
(
axis
=
self
.
_bn_axis
,
momentum
=
self
.
_norm_momentum
,
epsilon
=
self
.
_norm_epsilon
)(
x
)
x
=
tf_utils
.
get_activation
(
self
.
_activation_fn
)(
x
)
input_width
/=
2
elif
input_width
<
target_width
:
scale
=
target_width
//
input_width
x
=
spatial_transform_ops
.
nearest_upsampling
(
x
,
scale
=
scale
)
# Last 1x1 conv to match filter size.
x
=
layers
.
Conv2D
(
filters
=
target_num_filters
,
kernel_size
=
1
,
strides
=
1
,
use_bias
=
False
,
kernel_initializer
=
self
.
_kernel_initializer
,
kernel_regularizer
=
self
.
_kernel_regularizer
,
bias_regularizer
=
self
.
_bias_regularizer
)(
x
)
x
=
self
.
_norm
(
axis
=
self
.
_bn_axis
,
momentum
=
self
.
_norm_momentum
,
epsilon
=
self
.
_norm_epsilon
)(
x
)
return
x
def
get_config
(
self
):
config_dict
=
{
'min_level'
:
self
.
_min_level
,
'max_level'
:
self
.
_max_level
,
'endpoints_num_filters'
:
self
.
_endpoints_num_filters
,
'se_ratio'
:
self
.
_se_ratio
,
'expand_ratio'
:
self
.
_expand_ratio
,
'block_repeats'
:
self
.
_block_repeats
,
'filter_size_scale'
:
self
.
_filter_size_scale
,
'init_stochastic_depth_rate'
:
self
.
_init_stochastic_depth_rate
,
'kernel_initializer'
:
self
.
_kernel_initializer
,
'kernel_regularizer'
:
self
.
_kernel_regularizer
,
'bias_regularizer'
:
self
.
_bias_regularizer
,
'activation'
:
self
.
_activation
,
'use_sync_bn'
:
self
.
_use_sync_bn
,
'norm_momentum'
:
self
.
_norm_momentum
,
'norm_epsilon'
:
self
.
_norm_epsilon
}
return
config_dict
@
classmethod
def
from_config
(
cls
,
config
,
custom_objects
=
None
):
return
cls
(
**
config
)
@
property
def
output_specs
(
self
):
"""A dict of {level: TensorShape} pairs for the model output."""
return
self
.
_output_specs
@
factory
.
register_backbone_builder
(
'spinenet_mobile'
)
def
build_spinenet_mobile
(
input_specs
:
tf
.
keras
.
layers
.
InputSpec
,
model_config
,
l2_regularizer
:
tf
.
keras
.
regularizers
.
Regularizer
=
None
)
->
tf
.
keras
.
Model
:
"""Builds Mobile SpineNet backbone from a config."""
backbone_type
=
model_config
.
backbone
.
type
backbone_cfg
=
model_config
.
backbone
.
get
()
norm_activation_config
=
model_config
.
norm_activation
assert
backbone_type
==
'spinenet_mobile'
,
(
f
'Inconsistent backbone type '
f
'
{
backbone_type
}
'
)
model_id
=
backbone_cfg
.
model_id
if
model_id
not
in
SCALING_MAP
:
raise
ValueError
(
'Mobile SpineNet-{} is not a valid architecture.'
.
format
(
model_id
))
scaling_params
=
SCALING_MAP
[
model_id
]
return
SpineNetMobile
(
input_specs
=
input_specs
,
min_level
=
model_config
.
min_level
,
max_level
=
model_config
.
max_level
,
endpoints_num_filters
=
scaling_params
[
'endpoints_num_filters'
],
block_repeats
=
scaling_params
[
'block_repeats'
],
filter_size_scale
=
scaling_params
[
'filter_size_scale'
],
se_ratio
=
backbone_cfg
.
se_ratio
,
expand_ratio
=
backbone_cfg
.
expand_ratio
,
init_stochastic_depth_rate
=
backbone_cfg
.
stochastic_depth_drop_rate
,
kernel_regularizer
=
l2_regularizer
,
activation
=
norm_activation_config
.
activation
,
use_sync_bn
=
norm_activation_config
.
use_sync_bn
,
norm_momentum
=
norm_activation_config
.
norm_momentum
,
norm_epsilon
=
norm_activation_config
.
norm_epsilon
)
official/vision/beta/modeling/backbones/spinenet_mobile_test.py
0 → 100644
View file @
790e49e5
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SpineNet."""
# Import libraries
from
absl.testing
import
parameterized
import
tensorflow
as
tf
from
official.vision.beta.modeling.backbones
import
spinenet_mobile
class
SpineNetMobileTest
(
parameterized
.
TestCase
,
tf
.
test
.
TestCase
):
@
parameterized
.
parameters
(
(
128
,
0.6
,
1
,
0.0
,
24
),
(
128
,
0.65
,
1
,
0.2
,
40
),
(
256
,
1.0
,
1
,
0.2
,
48
),
)
def
test_network_creation
(
self
,
input_size
,
filter_size_scale
,
block_repeats
,
se_ratio
,
endpoints_num_filters
):
"""Test creation of SpineNet models."""
min_level
=
3
max_level
=
7
tf
.
keras
.
backend
.
set_image_data_format
(
'channels_last'
)
input_specs
=
tf
.
keras
.
layers
.
InputSpec
(
shape
=
[
None
,
input_size
,
input_size
,
3
])
model
=
spinenet_mobile
.
SpineNetMobile
(
input_specs
=
input_specs
,
min_level
=
min_level
,
max_level
=
max_level
,
endpoints_num_filters
=
endpoints_num_filters
,
resample_alpha
=
se_ratio
,
block_repeats
=
block_repeats
,
filter_size_scale
=
filter_size_scale
,
init_stochastic_depth_rate
=
0.2
,
)
inputs
=
tf
.
keras
.
Input
(
shape
=
(
input_size
,
input_size
,
3
),
batch_size
=
1
)
endpoints
=
model
(
inputs
)
for
l
in
range
(
min_level
,
max_level
+
1
):
self
.
assertIn
(
str
(
l
),
endpoints
.
keys
())
self
.
assertAllEqual
(
[
1
,
input_size
/
2
**
l
,
input_size
/
2
**
l
,
endpoints_num_filters
],
endpoints
[
str
(
l
)].
shape
.
as_list
())
def
test_serialize_deserialize
(
self
):
# Create a network object that sets all of its config options.
kwargs
=
dict
(
min_level
=
3
,
max_level
=
7
,
endpoints_num_filters
=
256
,
se_ratio
=
0.2
,
expand_ratio
=
6
,
block_repeats
=
1
,
filter_size_scale
=
1.0
,
init_stochastic_depth_rate
=
0.2
,
use_sync_bn
=
False
,
activation
=
'relu'
,
norm_momentum
=
0.99
,
norm_epsilon
=
0.001
,
kernel_initializer
=
'VarianceScaling'
,
kernel_regularizer
=
None
,
bias_regularizer
=
None
,
)
network
=
spinenet_mobile
.
SpineNetMobile
(
**
kwargs
)
expected_config
=
dict
(
kwargs
)
self
.
assertEqual
(
network
.
get_config
(),
expected_config
)
# Create another network object from the first object's config.
new_network
=
spinenet_mobile
.
SpineNetMobile
.
from_config
(
network
.
get_config
())
# Validate that the config can be forced to JSON.
_
=
new_network
.
to_json
()
# If the serialization was successful, the new config should match the old.
self
.
assertAllEqual
(
network
.
get_config
(),
new_network
.
get_config
())
if
__name__
==
'__main__'
:
tf
.
test
.
main
()
official/vision/beta/modeling/backbones/spinenet_test.py
View file @
790e49e5
# Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
...
...
@@ -12,7 +11,8 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Lint as: python3
"""Tests for SpineNet."""
# Import libraries
from
absl.testing
import
parameterized
...
...
official/vision/beta/modeling/classification_model.py
View file @
790e49e5
# Copyright 202
0
The TensorFlow Authors. All Rights Reserved.
# Copyright 202
1
The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
...
...
@@ -11,7 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Build classification models."""
# Import libraries
...
...
@@ -59,28 +59,10 @@ class ClassificationModel(tf.keras.Model):
skip_logits_layer: `bool`, whether to skip the prediction layer.
**kwargs: keyword arguments to be passed.
"""
self
.
_self_setattr_tracking
=
False
self
.
_config_dict
=
{
'backbone'
:
backbone
,
'num_classes'
:
num_classes
,
'input_specs'
:
input_specs
,
'dropout_rate'
:
dropout_rate
,
'kernel_initializer'
:
kernel_initializer
,
'kernel_regularizer'
:
kernel_regularizer
,
'bias_regularizer'
:
bias_regularizer
,
'add_head_batch_norm'
:
add_head_batch_norm
,
'use_sync_bn'
:
use_sync_bn
,
'norm_momentum'
:
norm_momentum
,
'norm_epsilon'
:
norm_epsilon
,
}
self
.
_input_specs
=
input_specs
self
.
_kernel_regularizer
=
kernel_regularizer
self
.
_bias_regularizer
=
bias_regularizer
self
.
_backbone
=
backbone
if
use_sync_bn
:
self
.
_
norm
=
tf
.
keras
.
layers
.
experimental
.
SyncBatchNormalization
norm
=
tf
.
keras
.
layers
.
experimental
.
SyncBatchNormalization
else
:
self
.
_
norm
=
tf
.
keras
.
layers
.
BatchNormalization
norm
=
tf
.
keras
.
layers
.
BatchNormalization
axis
=
-
1
if
tf
.
keras
.
backend
.
image_data_format
()
==
'channels_last'
else
1
inputs
=
tf
.
keras
.
Input
(
shape
=
input_specs
.
shape
[
1
:])
...
...
@@ -88,18 +70,37 @@ class ClassificationModel(tf.keras.Model):
x
=
endpoints
[
max
(
endpoints
.
keys
())]
if
add_head_batch_norm
:
x
=
self
.
_
norm
(
axis
=
axis
,
momentum
=
norm_momentum
,
epsilon
=
norm_epsilon
)(
x
)
x
=
norm
(
axis
=
axis
,
momentum
=
norm_momentum
,
epsilon
=
norm_epsilon
)(
x
)
x
=
tf
.
keras
.
layers
.
GlobalAveragePooling2D
()(
x
)
if
not
skip_logits_layer
:
x
=
tf
.
keras
.
layers
.
Dropout
(
dropout_rate
)(
x
)
x
=
tf
.
keras
.
layers
.
Dense
(
num_classes
,
kernel_initializer
=
kernel_initializer
,
kernel_regularizer
=
self
.
_kernel_regularizer
,
bias_regularizer
=
self
.
_bias_regularizer
)(
num_classes
,
kernel_initializer
=
kernel_initializer
,
kernel_regularizer
=
kernel_regularizer
,
bias_regularizer
=
bias_regularizer
)(
x
)
super
(
ClassificationModel
,
self
).
__init__
(
inputs
=
inputs
,
outputs
=
x
,
**
kwargs
)
self
.
_config_dict
=
{
'backbone'
:
backbone
,
'num_classes'
:
num_classes
,
'input_specs'
:
input_specs
,
'dropout_rate'
:
dropout_rate
,
'kernel_initializer'
:
kernel_initializer
,
'kernel_regularizer'
:
kernel_regularizer
,
'bias_regularizer'
:
bias_regularizer
,
'add_head_batch_norm'
:
add_head_batch_norm
,
'use_sync_bn'
:
use_sync_bn
,
'norm_momentum'
:
norm_momentum
,
'norm_epsilon'
:
norm_epsilon
,
}
self
.
_input_specs
=
input_specs
self
.
_kernel_regularizer
=
kernel_regularizer
self
.
_bias_regularizer
=
bias_regularizer
self
.
_backbone
=
backbone
self
.
_norm
=
norm
@
property
def
checkpoint_items
(
self
):
...
...
official/vision/beta/modeling/classification_model_test.py
View file @
790e49e5
# Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
...
...
@@ -12,7 +11,8 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Lint as: python3
"""Tests for classification network."""
# Import libraries
...
...
official/vision/beta/modeling/decoders/__init__.py
View file @
790e49e5
# Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
...
...
@@ -12,7 +11,8 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Lint as: python3
"""Decoders package definition."""
from
official.vision.beta.modeling.decoders.aspp
import
ASPP
...
...
official/vision/beta/modeling/decoders/aspp.py
View file @
790e49e5
# Copyright 202
0
The TensorFlow Authors. All Rights Reserved.
# Copyright 202
1
The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
...
...
@@ -11,8 +11,8 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ASPP decoder."""
"""
Contains definitions of Atrous Spatial Pyramid Pooling (
ASPP
)
decoder."""
# Import libraries
import
tensorflow
as
tf
...
...
@@ -22,7 +22,7 @@ from official.vision import keras_cv
@
tf
.
keras
.
utils
.
register_keras_serializable
(
package
=
'Vision'
)
class
ASPP
(
tf
.
keras
.
layers
.
Layer
):
"""
ASPP
."""
"""
Creates an Atrous Spatial Pyramid Pooling (ASPP) layer
."""
def
__init__
(
self
,
level
,
...
...
@@ -38,26 +38,28 @@ class ASPP(tf.keras.layers.Layer):
kernel_regularizer
=
None
,
interpolation
=
'bilinear'
,
**
kwargs
):
"""
ASPP i
nitializ
ation function
.
"""
I
nitializ
es an Atrous Spatial Pyramid Pooling (ASPP) layer
.
Args:
level: `int` level to apply ASPP.
dilation_rates: `list` of dilation rates.
num_filters: `int` number of output filters in ASPP.
pool_kernel_size: `list` of [height, width] of pooling kernel size or
level:
An
`int` level to apply ASPP.
dilation_rates:
A
`list` of dilation rates.
num_filters:
An
`int` number of output filters in ASPP.
pool_kernel_size:
A
`list` of [height, width] of pooling kernel size or
None. Pooling size is with respect to original image size, it will be
scaled down by 2**level. If None, global average pooling is used.
use_sync_bn: if True, use synchronized batch normalization.
norm_momentum: `float` normalization omentum for the moving average.
norm_epsilon: `float` small float added to variance to avoid dividing by
zero.
activation: `str` activation to be used in ASPP.
dropout_rate: `float` rate for dropout regularization.
kernel_initializer: kernel_initializer for convolutional layers.
kernel_regularizer: tf.keras.regularizers.Regularizer object for Conv2D.
interpolation: interpolation method, one of bilinear, nearest, bicubic,
area, lanczos3, lanczos5, gaussian, or mitchellcubic.
**kwargs: keyword arguments to be passed.
use_sync_bn: A `bool`. If True, use synchronized batch normalization.
norm_momentum: A `float` of normalization momentum for the moving average.
norm_epsilon: A `float` added to variance to avoid dividing by zero.
activation: A `str` activation to be used in ASPP.
dropout_rate: A `float` rate for dropout regularization.
kernel_initializer: A `str` name of kernel_initializer for convolutional
layers.
kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for
Conv2D. Default is None.
interpolation: A `str` of interpolation method. It should be one of
`bilinear`, `nearest`, `bicubic`, `area`, `lanczos3`, `lanczos5`,
`gaussian`, or `mitchellcubic`.
**kwargs: Additional keyword arguments to be passed.
"""
super
(
ASPP
,
self
).
__init__
(
**
kwargs
)
self
.
_config_dict
=
{
...
...
@@ -96,20 +98,22 @@ class ASPP(tf.keras.layers.Layer):
interpolation
=
self
.
_config_dict
[
'interpolation'
])
def
call
(
self
,
inputs
):
"""
ASPP call method
.
"""
Calls the Atrous Spatial Pyramid Pooling (ASPP) layer on an input
.
The output of ASPP will be a dict of level, Tensor even if only one
The output of ASPP will be a dict of
{`
level
`
,
`tf.
Tensor
`}
even if only one
level is present. Hence, this will be compatible with the rest of the
segmentation model interfaces.
.
segmentation model interfaces.
Args:
inputs: A dict of tensors
- key: `str`, the level of the multilevel feature maps.
- values: `Tensor`, [batch, height_l, width_l, filter_size].
inputs: A `dict` of `tf.Tensor` where
- key: A `str` of the level of the multilevel feature maps.
- values: A `tf.Tensor` of shape [batch, height_l, width_l,
filter_size].
Returns:
A dict of
tensors
- key: `str`
,
the level of the multilevel feature maps.
- values:
`
Tensor`
,
output of ASPP module.
A
`
dict
`
of
`tf.Tensor` where
- key:
A
`str`
of
the level of the multilevel feature maps.
- values:
A `tf.
Tensor`
of
output of ASPP module.
"""
outputs
=
{}
level
=
str
(
self
.
_config_dict
[
'level'
])
...
...
official/vision/beta/modeling/decoders/aspp_test.py
View file @
790e49e5
# Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
...
...
@@ -12,7 +11,8 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Lint as: python3
"""Tests for aspp."""
# Import libraries
...
...
official/vision/beta/modeling/decoders/factory.py
View file @
790e49e5
# Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
...
...
@@ -12,8 +11,10 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""factory method."""
# Lint as: python3
"""Contains the factory method to create decoders."""
# Import libraries
import
tensorflow
as
tf
...
...
@@ -26,13 +27,14 @@ def build_decoder(input_specs,
"""Builds decoder from a config.
Args:
input_specs: `dict` input specifications. A dictionary consists of
input_specs:
A
`dict`
of
input specifications. A dictionary consists of
{level: TensorShape} from a backbone.
model_config: A OneOfConfig. Model config.
l2_regularizer: tf.keras.regularizers.Regularizer instance. Default to None.
l2_regularizer: A `tf.keras.regularizers.Regularizer` instance. Default to
None.
Returns:
tf.keras.Model instance of the decoder.
A `
tf.keras.Model
`
instance of the decoder.
"""
decoder_type
=
model_config
.
decoder
.
type
decoder_cfg
=
model_config
.
decoder
.
get
()
...
...
official/vision/beta/modeling/decoders/fpn.py
View file @
790e49e5
# Copyright 202
0
The TensorFlow Authors. All Rights Reserved.
# Copyright 202
1
The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
...
...
@@ -11,14 +11,8 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Feature Pyramid Networks.
Feature Pyramid Networks were proposed in:
[1] Tsung-Yi Lin, Piotr Dollar, Ross Girshick, Kaiming He, Bharath Hariharan,
, and Serge Belongie
Feature Pyramid Networks for Object Detection. CVPR 2017.
"""
"""Contains the definitions of Feature Pyramid Networks (FPN)."""
# Import libraries
import
tensorflow
as
tf
...
...
@@ -29,7 +23,14 @@ from official.vision.beta.ops import spatial_transform_ops
@
tf
.
keras
.
utils
.
register_keras_serializable
(
package
=
'Vision'
)
class
FPN
(
tf
.
keras
.
Model
):
"""Feature pyramid network."""
"""Creates a Feature Pyramid Network (FPN).
This implemets the paper:
Tsung-Yi Lin, Piotr Dollar, Ross Girshick, Kaiming He, Bharath Hariharan, and
Serge Belongie.
Feature Pyramid Networks for Object Detection.
(https://arxiv.org/pdf/1612.03144)
"""
def
__init__
(
self
,
input_specs
,
...
...
@@ -45,25 +46,26 @@ class FPN(tf.keras.Model):
kernel_regularizer
=
None
,
bias_regularizer
=
None
,
**
kwargs
):
"""
FPN i
nitializ
ation function
.
"""
I
nitializ
es a Feature Pyramid Network (FPN)
.
Args:
input_specs: `dict` input specifications. A dictionary consists of
input_specs:
A
`dict`
of
input specifications. A dictionary consists of
{level: TensorShape} from a backbone.
min_level: `int` minimum level in FPN output feature maps.
max_level: `int` maximum level in FPN output feature maps.
num_filters: `int` number of filters in FPN layers.
use_separable_conv: `bool`
, i
f True use separable convolution for
min_level:
An
`int`
of
minimum level in FPN output feature maps.
max_level:
An
`int`
of
maximum level in FPN output feature maps.
num_filters:
An
`int` number of filters in FPN layers.
use_separable_conv:
A
`bool`
. I
f True use separable convolution for
convolution in FPN layers.
activation: `str` name of the activation function.
use_sync_bn: if True, use synchronized batch normalization.
norm_momentum: `float` normalization omentum for the moving average.
norm_epsilon: `float` small float added to variance to avoid dividing by
zero.
kernel_initializer: kernel_initializer for convolutional layers.
kernel_regularizer: tf.keras.regularizers.Regularizer object for Conv2D.
bias_regularizer: tf.keras.regularizers.Regularizer object for Conv2d.
**kwargs: keyword arguments to be passed.
activation: A `str` name of the activation function.
use_sync_bn: A `bool`. If True, use synchronized batch normalization.
norm_momentum: A `float` of normalization momentum for the moving average.
norm_epsilon: A `float` added to variance to avoid dividing by zero.
kernel_initializer: A `str` name of kernel_initializer for convolutional
layers.
kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for
Conv2D. Default is None.
bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D.
**kwargs: Additional keyword arguments to be passed.
"""
self
.
_config_dict
=
{
'input_specs'
:
input_specs
,
...
...
official/vision/beta/modeling/decoders/fpn_test.py
View file @
790e49e5
# Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
...
...
@@ -12,7 +11,8 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Lint as: python3
"""Tests for FPN."""
# Import libraries
...
...
official/vision/beta/modeling/decoders/nasfpn.py
View file @
790e49e5
# Copyright 202
0
The TensorFlow Authors. All Rights Reserved.
# Copyright 202
1
The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
...
...
@@ -11,13 +11,8 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""NAS-FPN.
Golnaz Ghiasi, Tsung-Yi Lin, Ruoming Pang, Quoc V. Le.
NAS-FPN: Learning Scalable Feature Pyramid Architecture for Object Detection.
https://arxiv.org/abs/1904.07392. CVPR 2019.
"""
"""Contains definitions of NAS-FPN."""
# Import libraries
from
absl
import
logging
...
...
@@ -60,7 +55,13 @@ def build_block_specs(block_specs=None):
@
tf
.
keras
.
utils
.
register_keras_serializable
(
package
=
'Vision'
)
class
NASFPN
(
tf
.
keras
.
Model
):
"""NAS-FPN."""
"""Creates a NAS-FPN model.
This implements the paper:
Golnaz Ghiasi, Tsung-Yi Lin, Ruoming Pang, Quoc V. Le.
NAS-FPN: Learning Scalable Feature Pyramid Architecture for Object Detection.
(https://arxiv.org/abs/1904.07392)
"""
def
__init__
(
self
,
input_specs
,
...
...
@@ -78,29 +79,30 @@ class NASFPN(tf.keras.Model):
kernel_regularizer
=
None
,
bias_regularizer
=
None
,
**
kwargs
):
"""
FPN i
nitializ
ation function
.
"""
I
nitializ
es a NAS-FPN model
.
Args:
input_specs: `dict` input specifications. A dictionary consists of
input_specs:
A
`dict`
of
input specifications. A dictionary consists of
{level: TensorShape} from a backbone.
min_level: `int` minimum level in FPN output feature maps.
max_level: `int` maximum level in FPN output feature maps.
min_level:
An
`int`
of
minimum level in FPN output feature maps.
max_level:
An
`int`
of
maximum level in FPN output feature maps.
block_specs: a list of BlockSpec objects that specifies the NAS-FPN
network topology. By default, the previously discovered architecture is
used.
num_filters: `int` number of filters in FPN layers.
num_filters:
An
`int` number of filters in FPN layers.
num_repeats: number of repeats for feature pyramid network.
use_separable_conv: `bool`
, i
f True use separable convolution for
use_separable_conv:
A
`bool`
. I
f True use separable convolution for
convolution in FPN layers.
activation: `str` name of the activation function.
use_sync_bn: if True, use synchronized batch normalization.
norm_momentum: `float` normalization omentum for the moving average.
norm_epsilon: `float` small float added to variance to avoid dividing by
zero.
kernel_initializer: kernel_initializer for convolutional layers.
kernel_regularizer: tf.keras.regularizers.Regularizer object for Conv2D.
bias_regularizer: tf.keras.regularizers.Regularizer object for Conv2d.
**kwargs: keyword arguments to be passed.
activation: A `str` name of the activation function.
use_sync_bn: A `bool`. If True, use synchronized batch normalization.
norm_momentum: A `float` of normalization momentum for the moving average.
norm_epsilon: A `float` added to variance to avoid dividing by zero.
kernel_initializer: A `str` name of kernel_initializer for convolutional
layers.
kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for
Conv2D. Default is None.
bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D.
**kwargs: Additional keyword arguments to be passed.
"""
self
.
_config_dict
=
{
'input_specs'
:
input_specs
,
...
...
official/vision/beta/modeling/decoders/nasfpn_test.py
View file @
790e49e5
# Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
...
...
@@ -12,7 +11,8 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Lint as: python3
"""Tests for NAS-FPN."""
# Import libraries
...
...
official/vision/beta/modeling/factory.py
View file @
790e49e5
# Copyright 202
0
The TensorFlow Authors. All Rights Reserved.
# Copyright 202
1
The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
...
...
@@ -11,7 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Factory methods to build models."""
# Import libraries
...
...
official/vision/beta/modeling/factory_3d.py
View file @
790e49e5
# Copyright 202
0
The TensorFlow Authors. All Rights Reserved.
# Copyright 202
1
The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
...
...
@@ -11,7 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Factory methods to build models."""
# Import libraries
...
...
Prev
1
…
3
4
5
6
7
8
9
10
11
…
19
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment