Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ModelZoo
ResNet50_tensorflow
Commits
b16447b4
Commit
b16447b4
authored
Mar 25, 2020
by
A. Unique TensorFlower
Browse files
Internal changes.
PiperOrigin-RevId: 303012385
parent
24308bbc
Changes
6
Show whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
1 addition
and
161 deletions
+1
-161
official/vision/detection/configs/base_config.py
official/vision/detection/configs/base_config.py
+0
-41
official/vision/detection/configs/retinanet_config.py
official/vision/detection/configs/retinanet_config.py
+0
-20
official/vision/detection/main.py
official/vision/detection/main.py
+1
-1
official/vision/detection/modeling/architecture/factory.py
official/vision/detection/modeling/architecture/factory.py
+0
-7
official/vision/detection/modeling/architecture/nn_ops.py
official/vision/detection/modeling/architecture/nn_ops.py
+0
-85
official/vision/detection/modeling/architecture/resnet.py
official/vision/detection/modeling/architecture/resnet.py
+0
-7
No files found.
official/vision/detection/configs/base_config.py
View file @
b16447b4
...
@@ -87,10 +87,6 @@ BASE_CFG = {
...
@@ -87,10 +87,6 @@ BASE_CFG = {
},
},
'resnet'
:
{
'resnet'
:
{
'resnet_depth'
:
50
,
'resnet_depth'
:
50
,
'dropblock'
:
{
'dropblock_keep_prob'
:
None
,
'dropblock_size'
:
None
,
},
'batch_norm'
:
{
'batch_norm'
:
{
'batch_norm_momentum'
:
0.997
,
'batch_norm_momentum'
:
0.997
,
'batch_norm_epsilon'
:
1e-4
,
'batch_norm_epsilon'
:
1e-4
,
...
@@ -111,43 +107,6 @@ BASE_CFG = {
...
@@ -111,43 +107,6 @@ BASE_CFG = {
'use_sync_bn'
:
False
,
'use_sync_bn'
:
False
,
},
},
},
},
'nasfpn'
:
{
'min_level'
:
3
,
'max_level'
:
7
,
'fpn_feat_dims'
:
256
,
'num_repeats'
:
5
,
'use_separable_conv'
:
False
,
'dropblock'
:
{
'dropblock_keep_prob'
:
None
,
'dropblock_size'
:
None
,
},
'batch_norm'
:
{
'batch_norm_momentum'
:
0.997
,
'batch_norm_epsilon'
:
1e-4
,
'batch_norm_trainable'
:
True
,
'use_sync_bn'
:
False
,
},
},
# tunable_nasfpn:strip_begin
'tunable_nasfpn_v1'
:
{
'min_level'
:
3
,
'max_level'
:
7
,
'fpn_feat_dims'
:
256
,
'num_repeats'
:
5
,
'use_separable_conv'
:
False
,
'dropblock'
:
{
'dropblock_keep_prob'
:
None
,
'dropblock_size'
:
None
,
},
'batch_norm'
:
{
'batch_norm_momentum'
:
0.997
,
'batch_norm_epsilon'
:
1e-4
,
'batch_norm_trainable'
:
True
,
'use_sync_bn'
:
False
,
},
'nodes'
:
None
},
# tunable_nasfpn:strip_end
'postprocess'
:
{
'postprocess'
:
{
'use_batched_nms'
:
False
,
'use_batched_nms'
:
False
,
'max_total_size'
:
100
,
'max_total_size'
:
100
,
...
...
official/vision/detection/configs/retinanet_config.py
View file @
b16447b4
...
@@ -106,10 +106,6 @@ RETINANET_CFG = {
...
@@ -106,10 +106,6 @@ RETINANET_CFG = {
},
},
'resnet'
:
{
'resnet'
:
{
'resnet_depth'
:
50
,
'resnet_depth'
:
50
,
'dropblock'
:
{
'dropblock_keep_prob'
:
None
,
'dropblock_size'
:
None
,
},
'batch_norm'
:
{
'batch_norm'
:
{
'batch_norm_momentum'
:
0.997
,
'batch_norm_momentum'
:
0.997
,
'batch_norm_epsilon'
:
1e-4
,
'batch_norm_epsilon'
:
1e-4
,
...
@@ -128,22 +124,6 @@ RETINANET_CFG = {
...
@@ -128,22 +124,6 @@ RETINANET_CFG = {
'batch_norm_trainable'
:
True
,
'batch_norm_trainable'
:
True
,
},
},
},
},
'nasfpn'
:
{
'min_level'
:
3
,
'max_level'
:
7
,
'fpn_feat_dims'
:
256
,
'num_repeats'
:
5
,
'use_separable_conv'
:
False
,
'dropblock'
:
{
'dropblock_keep_prob'
:
None
,
'dropblock_size'
:
None
,
},
'batch_norm'
:
{
'batch_norm_momentum'
:
0.997
,
'batch_norm_epsilon'
:
1e-4
,
'batch_norm_trainable'
:
True
,
},
},
'retinanet_head'
:
{
'retinanet_head'
:
{
'min_level'
:
3
,
'min_level'
:
3
,
'max_level'
:
7
,
'max_level'
:
7
,
...
...
official/vision/detection/main.py
View file @
b16447b4
...
@@ -52,7 +52,7 @@ flags.DEFINE_string(
...
@@ -52,7 +52,7 @@ flags.DEFINE_string(
flags
.
DEFINE_string
(
flags
.
DEFINE_string
(
'model'
,
default
=
'retinanet'
,
'model'
,
default
=
'retinanet'
,
help
=
'Model to run: `retinanet` or `
shapemask
`.'
)
help
=
'Model to run: `retinanet` or `
mask_rcnn
`.'
)
flags
.
DEFINE_string
(
'training_file_pattern'
,
None
,
flags
.
DEFINE_string
(
'training_file_pattern'
,
None
,
'Location of the train data.'
)
'Location of the train data.'
)
...
...
official/vision/detection/modeling/architecture/factory.py
View file @
b16447b4
...
@@ -37,19 +37,12 @@ def batch_norm_relu_generator(params):
...
@@ -37,19 +37,12 @@ def batch_norm_relu_generator(params):
return
_batch_norm_op
return
_batch_norm_op
def
dropblock_generator
(
params
):
return
nn_ops
.
Dropblock
(
dropblock_keep_prob
=
params
.
dropblock_keep_prob
,
dropblock_size
=
params
.
dropblock_size
)
def
backbone_generator
(
params
):
def
backbone_generator
(
params
):
"""Generator function for various backbone models."""
"""Generator function for various backbone models."""
if
params
.
architecture
.
backbone
==
'resnet'
:
if
params
.
architecture
.
backbone
==
'resnet'
:
resnet_params
=
params
.
resnet
resnet_params
=
params
.
resnet
backbone_fn
=
resnet
.
Resnet
(
backbone_fn
=
resnet
.
Resnet
(
resnet_depth
=
resnet_params
.
resnet_depth
,
resnet_depth
=
resnet_params
.
resnet_depth
,
dropblock
=
dropblock_generator
(
resnet_params
.
dropblock
),
batch_norm_relu
=
batch_norm_relu_generator
(
resnet_params
.
batch_norm
))
batch_norm_relu
=
batch_norm_relu_generator
(
resnet_params
.
batch_norm
))
else
:
else
:
raise
ValueError
(
'Backbone model %s is not supported.'
%
raise
ValueError
(
'Backbone model %s is not supported.'
%
...
...
official/vision/detection/modeling/architecture/nn_ops.py
View file @
b16447b4
...
@@ -84,88 +84,3 @@ class BatchNormRelu(tf.keras.layers.Layer):
...
@@ -84,88 +84,3 @@ class BatchNormRelu(tf.keras.layers.Layer):
inputs
=
tf
.
nn
.
relu
(
inputs
)
inputs
=
tf
.
nn
.
relu
(
inputs
)
return
inputs
return
inputs
class
Dropblock
(
object
):
"""DropBlock: a regularization method for convolutional neural networks.
DropBlock is a form of structured dropout, where units in a contiguous
region of a feature map are dropped together. DropBlock works better than
dropout on convolutional layers due to the fact that activation units in
convolutional layers are spatially correlated.
See https://arxiv.org/pdf/1810.12890.pdf for details.
"""
def
__init__
(
self
,
dropblock_keep_prob
=
None
,
dropblock_size
=
None
,
data_format
=
'channels_last'
):
self
.
_dropblock_keep_prob
=
dropblock_keep_prob
self
.
_dropblock_size
=
dropblock_size
self
.
_data_format
=
data_format
def
__call__
(
self
,
net
,
is_training
=
False
):
"""Builds Dropblock layer.
Args:
net: `Tensor` input tensor.
is_training: `bool` if True, the model is in training mode.
Returns:
A version of input tensor with DropBlock applied.
"""
if
not
is_training
or
self
.
_dropblock_keep_prob
is
None
:
return
net
logging
.
info
(
'Applying DropBlock: dropblock_size {}, net.shape {}'
.
format
(
self
.
_dropblock_size
,
net
.
shape
))
if
self
.
_data_format
==
'channels_last'
:
_
,
height
,
width
,
_
=
net
.
get_shape
().
as_list
()
else
:
_
,
_
,
height
,
width
=
net
.
get_shape
().
as_list
()
total_size
=
width
*
height
dropblock_size
=
min
(
self
.
_dropblock_size
,
min
(
width
,
height
))
# Seed_drop_rate is the gamma parameter of DropBlcok.
seed_drop_rate
=
(
1.0
-
self
.
_dropblock_keep_prob
)
*
total_size
/
dropblock_size
**
2
/
(
(
width
-
self
.
_dropblock_size
+
1
)
*
(
height
-
self
.
_dropblock_size
+
1
))
# Forces the block to be inside the feature map.
w_i
,
h_i
=
tf
.
meshgrid
(
tf
.
range
(
width
),
tf
.
range
(
height
))
valid_block
=
tf
.
logical_and
(
tf
.
logical_and
(
w_i
>=
int
(
dropblock_size
//
2
),
w_i
<
width
-
(
dropblock_size
-
1
)
//
2
),
tf
.
logical_and
(
h_i
>=
int
(
dropblock_size
//
2
),
h_i
<
width
-
(
dropblock_size
-
1
)
//
2
))
if
self
.
_data_format
==
'channels_last'
:
valid_block
=
tf
.
reshape
(
valid_block
,
[
1
,
height
,
width
,
1
])
else
:
valid_block
=
tf
.
reshape
(
valid_block
,
[
1
,
1
,
height
,
width
])
randnoise
=
tf
.
random
.
uniform
(
net
.
shape
,
dtype
=
tf
.
float32
)
valid_block
=
tf
.
cast
(
valid_block
,
dtype
=
tf
.
float32
)
seed_keep_rate
=
tf
.
cast
(
1
-
seed_drop_rate
,
dtype
=
tf
.
float32
)
block_pattern
=
(
1
-
valid_block
+
seed_keep_rate
+
randnoise
)
>=
1
block_pattern
=
tf
.
cast
(
block_pattern
,
dtype
=
tf
.
float32
)
if
self
.
_data_format
==
'channels_last'
:
ksize
=
[
1
,
self
.
_dropblock_size
,
self
.
_dropblock_size
,
1
]
else
:
ksize
=
[
1
,
1
,
self
.
_dropblock_size
,
self
.
_dropblock_size
]
block_pattern
=
-
tf
.
nn
.
max_pool2d
(
-
block_pattern
,
ksize
=
ksize
,
strides
=
[
1
,
1
,
1
,
1
],
padding
=
'SAME'
,
data_format
=
'NHWC'
if
self
.
_data_format
==
'channels_last'
else
'NCHW'
)
percent_ones
=
tf
.
cast
(
tf
.
reduce_sum
(
input_tensor
=
block_pattern
),
tf
.
float32
)
/
tf
.
cast
(
tf
.
size
(
input
=
block_pattern
),
tf
.
float32
)
net
=
net
/
tf
.
cast
(
percent_ones
,
net
.
dtype
)
*
tf
.
cast
(
block_pattern
,
net
.
dtype
)
return
net
official/vision/detection/modeling/architecture/resnet.py
View file @
b16447b4
...
@@ -34,14 +34,12 @@ class Resnet(object):
...
@@ -34,14 +34,12 @@ class Resnet(object):
def
__init__
(
self
,
def
__init__
(
self
,
resnet_depth
,
resnet_depth
,
dropblock
=
nn_ops
.
Dropblock
(),
batch_norm_relu
=
nn_ops
.
BatchNormRelu
,
batch_norm_relu
=
nn_ops
.
BatchNormRelu
,
data_format
=
'channels_last'
):
data_format
=
'channels_last'
):
"""ResNet initialization function.
"""ResNet initialization function.
Args:
Args:
resnet_depth: `int` depth of ResNet backbone model.
resnet_depth: `int` depth of ResNet backbone model.
dropblock: a dropblock layer.
batch_norm_relu: an operation that includes a batch normalization layer
batch_norm_relu: an operation that includes a batch normalization layer
followed by a relu layer(optional).
followed by a relu layer(optional).
data_format: `str` either "channels_first" for `[batch, channels, height,
data_format: `str` either "channels_first" for `[batch, channels, height,
...
@@ -49,7 +47,6 @@ class Resnet(object):
...
@@ -49,7 +47,6 @@ class Resnet(object):
"""
"""
self
.
_resnet_depth
=
resnet_depth
self
.
_resnet_depth
=
resnet_depth
self
.
_dropblock
=
dropblock
self
.
_batch_norm_relu
=
batch_norm_relu
self
.
_batch_norm_relu
=
batch_norm_relu
self
.
_data_format
=
data_format
self
.
_data_format
=
data_format
...
@@ -219,24 +216,20 @@ class Resnet(object):
...
@@ -219,24 +216,20 @@ class Resnet(object):
inputs
=
inputs
,
filters
=
filters_out
,
kernel_size
=
1
,
strides
=
strides
)
inputs
=
inputs
,
filters
=
filters_out
,
kernel_size
=
1
,
strides
=
strides
)
shortcut
=
self
.
_batch_norm_relu
(
relu
=
False
)(
shortcut
=
self
.
_batch_norm_relu
(
relu
=
False
)(
shortcut
,
is_training
=
is_training
)
shortcut
,
is_training
=
is_training
)
shortcut
=
self
.
_dropblock
(
shortcut
,
is_training
=
is_training
)
inputs
=
self
.
conv2d_fixed_padding
(
inputs
=
self
.
conv2d_fixed_padding
(
inputs
=
inputs
,
filters
=
filters
,
kernel_size
=
1
,
strides
=
1
)
inputs
=
inputs
,
filters
=
filters
,
kernel_size
=
1
,
strides
=
1
)
inputs
=
self
.
_batch_norm_relu
()(
inputs
,
is_training
=
is_training
)
inputs
=
self
.
_batch_norm_relu
()(
inputs
,
is_training
=
is_training
)
inputs
=
self
.
_dropblock
(
inputs
,
is_training
=
is_training
)
inputs
=
self
.
conv2d_fixed_padding
(
inputs
=
self
.
conv2d_fixed_padding
(
inputs
=
inputs
,
filters
=
filters
,
kernel_size
=
3
,
strides
=
strides
)
inputs
=
inputs
,
filters
=
filters
,
kernel_size
=
3
,
strides
=
strides
)
inputs
=
self
.
_batch_norm_relu
()(
inputs
,
is_training
=
is_training
)
inputs
=
self
.
_batch_norm_relu
()(
inputs
,
is_training
=
is_training
)
inputs
=
self
.
_dropblock
(
inputs
,
is_training
=
is_training
)
inputs
=
self
.
conv2d_fixed_padding
(
inputs
=
self
.
conv2d_fixed_padding
(
inputs
=
inputs
,
filters
=
4
*
filters
,
kernel_size
=
1
,
strides
=
1
)
inputs
=
inputs
,
filters
=
4
*
filters
,
kernel_size
=
1
,
strides
=
1
)
inputs
=
self
.
_batch_norm_relu
(
inputs
=
self
.
_batch_norm_relu
(
relu
=
False
,
init_zero
=
True
)(
relu
=
False
,
init_zero
=
True
)(
inputs
,
is_training
=
is_training
)
inputs
,
is_training
=
is_training
)
inputs
=
self
.
_dropblock
(
inputs
,
is_training
=
is_training
)
return
tf
.
nn
.
relu
(
inputs
+
shortcut
)
return
tf
.
nn
.
relu
(
inputs
+
shortcut
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment