Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ModelZoo
ResNet50_tensorflow
Commits
d5747aac
Commit
d5747aac
authored
Nov 07, 2020
by
vishnubanna
Browse files
ready for review
parent
31c3ab9e
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
739 additions
and
563 deletions
+739
-563
official/vision/beta/projects/yolo/modeling/backbones/darknet.py
...l/vision/beta/projects/yolo/modeling/backbones/darknet.py
+19
-18
official/vision/beta/projects/yolo/modeling/layers/nn_blocks.py
...al/vision/beta/projects/yolo/modeling/layers/nn_blocks.py
+640
-533
official/vision/beta/projects/yolo/modeling/layers/nn_blocks_test.py
...sion/beta/projects/yolo/modeling/layers/nn_blocks_test.py
+80
-12
No files found.
official/vision/beta/projects/yolo/modeling/backbones/darknet.py
View file @
d5747aac
...
...
@@ -77,11 +77,11 @@ class layer_factory(object):
"""
def
__init__
(
self
):
self
.
_layer_dict
=
{
"
Dark
Conv"
:
(
nn_blocks
.
Dark
Conv
,
self
.
darkc
onv_config_todict
),
"Conv
BN
"
:
(
nn_blocks
.
Conv
BN
,
self
.
C
onv
BN
_config_todict
),
"MaxPool"
:
(
tf
.
keras
.
layers
.
MaxPool2D
,
self
.
maxpool_config_todict
)
}
def
darkc
onv_config_todict
(
self
,
config
,
kwargs
):
def
C
onv
BN
_config_todict
(
self
,
config
,
kwargs
):
dictvals
=
{
"filters"
:
config
.
filters
,
"kernel_size"
:
config
.
kernel_size
,
...
...
@@ -124,7 +124,7 @@ CSPDARKNET53 = {
"splits"
:
{
"backbone_split"
:
106
,
"neck_split"
:
138
},
"backbone"
:
[
[
"
Dark
Conv"
,
None
,
1
,
False
,
32
,
None
,
3
,
1
,
"same"
,
"mish"
,
-
1
,
0
,
False
],
[
"Conv
BN
"
,
None
,
1
,
False
,
32
,
None
,
3
,
1
,
"same"
,
"mish"
,
-
1
,
0
,
False
],
[
"DarkRes"
,
"csp"
,
1
,
True
,
64
,
None
,
None
,
None
,
None
,
"mish"
,
-
1
,
1
,
False
],
[
"DarkRes"
,
"csp"
,
2
,
False
,
128
,
None
,
None
,
None
,
None
,
"mish"
,
-
1
,
2
,
False
],
[
"DarkRes"
,
"csp"
,
8
,
False
,
256
,
None
,
None
,
None
,
None
,
"mish"
,
-
1
,
3
,
True
],
...
...
@@ -137,7 +137,7 @@ DARKNET53 = {
"list_names"
:
LISTNAMES
,
"splits"
:
{
"backbone_split"
:
76
},
"backbone"
:
[
[
"
Dark
Conv"
,
None
,
1
,
False
,
32
,
None
,
3
,
1
,
"same"
,
"leaky"
,
-
1
,
0
,
False
],
[
"Conv
BN
"
,
None
,
1
,
False
,
32
,
None
,
3
,
1
,
"same"
,
"leaky"
,
-
1
,
0
,
False
],
[
"DarkRes"
,
"residual"
,
1
,
True
,
64
,
None
,
None
,
None
,
None
,
"leaky"
,
-
1
,
1
,
False
],
[
"DarkRes"
,
"residual"
,
2
,
False
,
128
,
None
,
None
,
None
,
None
,
"leaky"
,
-
1
,
2
,
False
],
[
"DarkRes"
,
"residual"
,
8
,
False
,
256
,
None
,
None
,
None
,
None
,
"leaky"
,
-
1
,
3
,
True
],
...
...
@@ -150,12 +150,12 @@ CSPDARKNETTINY = {
"list_names"
:
LISTNAMES
,
"splits"
:
{
"backbone_split"
:
28
},
"backbone"
:
[
[
"
Dark
Conv"
,
None
,
1
,
False
,
32
,
None
,
3
,
2
,
"same"
,
"leaky"
,
-
1
,
0
,
False
],
[
"
Dark
Conv"
,
None
,
1
,
False
,
64
,
None
,
3
,
2
,
"same"
,
"leaky"
,
-
1
,
1
,
False
],
[
"Conv
BN
"
,
None
,
1
,
False
,
32
,
None
,
3
,
2
,
"same"
,
"leaky"
,
-
1
,
0
,
False
],
[
"Conv
BN
"
,
None
,
1
,
False
,
64
,
None
,
3
,
2
,
"same"
,
"leaky"
,
-
1
,
1
,
False
],
[
"CSPTiny"
,
"csp_tiny"
,
1
,
False
,
64
,
None
,
3
,
2
,
"same"
,
"leaky"
,
-
1
,
2
,
False
],
[
"CSPTiny"
,
"csp_tiny"
,
1
,
False
,
128
,
None
,
3
,
2
,
"same"
,
"leaky"
,
-
1
,
3
,
False
],
[
"CSPTiny"
,
"csp_tiny"
,
1
,
False
,
256
,
None
,
3
,
2
,
"same"
,
"leaky"
,
-
1
,
4
,
True
],
[
"
Dark
Conv"
,
None
,
1
,
False
,
512
,
None
,
3
,
1
,
"same"
,
"leaky"
,
-
1
,
5
,
True
],
[
"Conv
BN
"
,
None
,
1
,
False
,
512
,
None
,
3
,
1
,
"same"
,
"leaky"
,
-
1
,
5
,
True
],
]
}
...
...
@@ -163,7 +163,7 @@ DARKNETTINY = {
"list_names"
:
LISTNAMES
,
"splits"
:
{
"backbone_split"
:
14
},
"backbone"
:
[
[
"
Dark
Conv"
,
None
,
1
,
False
,
16
,
None
,
3
,
1
,
"same"
,
"leaky"
,
-
1
,
0
,
False
],
[
"Conv
BN
"
,
None
,
1
,
False
,
16
,
None
,
3
,
1
,
"same"
,
"leaky"
,
-
1
,
0
,
False
],
[
"DarkTiny"
,
"tiny"
,
1
,
True
,
32
,
None
,
3
,
2
,
"same"
,
"leaky"
,
-
1
,
1
,
False
],
[
"DarkTiny"
,
"tiny"
,
1
,
True
,
64
,
None
,
3
,
2
,
"same"
,
"leaky"
,
-
1
,
2
,
False
],
[
"DarkTiny"
,
"tiny"
,
1
,
False
,
128
,
None
,
3
,
2
,
"same"
,
"leaky"
,
-
1
,
3
,
False
],
...
...
@@ -292,27 +292,28 @@ class Darknet(ks.Model):
def
_csp_stack
(
self
,
inputs
,
config
,
name
):
if
config
.
bottleneck
:
csp_filter_
reduc
e
=
1
residual_filter_
reduc
e
=
2
csp_filter_
scal
e
=
1
residual_filter_
scal
e
=
2
scale_filters
=
1
else
:
csp_filter_
reduc
e
=
2
residual_filter_
reduc
e
=
1
csp_filter_
scal
e
=
2
residual_filter_
scal
e
=
1
scale_filters
=
2
self
.
_default_dict
[
"activation"
]
=
self
.
_get_activation
(
config
.
activation
)
self
.
_default_dict
[
"name"
]
=
f
"
{
name
}
_csp_down"
x
,
x_route
=
nn_blocks
.
CSPDownSample
(
filters
=
config
.
filters
,
filter_reduce
=
csp_filter_reduce
,
**
self
.
_default_dict
)(
inputs
)
x
,
x_route
=
nn_blocks
.
CSPRoute
(
filters
=
config
.
filters
,
filter_scale
=
csp_filter_scale
,
downsample
=
True
,
**
self
.
_default_dict
)(
inputs
)
for
i
in
range
(
config
.
repetitions
):
self
.
_default_dict
[
"name"
]
=
f
"
{
name
}
_
{
i
}
"
x
=
nn_blocks
.
DarkResidual
(
filters
=
config
.
filters
//
scale_filters
,
filter_scale
=
residual_filter_
reduc
e
,
filter_scale
=
residual_filter_
scal
e
,
**
self
.
_default_dict
)(
x
)
self
.
_default_dict
[
"name"
]
=
f
"
{
name
}
_csp_connect"
output
=
nn_blocks
.
CSPConnect
(
filters
=
config
.
filters
,
filter_
reduc
e
=
csp_filter_
reduc
e
,
filter_
scal
e
=
csp_filter_
scal
e
,
**
self
.
_default_dict
)([
x
,
x_route
])
self
.
_default_dict
[
"activation"
]
=
self
.
_activation
self
.
_default_dict
[
"name"
]
=
None
...
...
@@ -335,7 +336,7 @@ class Darknet(ks.Model):
name
=
f
"
{
name
}
_tiny/pool"
)(
inputs
)
self
.
_default_dict
[
"activation"
]
=
self
.
_get_activation
(
config
.
activation
)
self
.
_default_dict
[
"name"
]
=
f
"
{
name
}
_tiny/conv"
x
=
nn_blocks
.
Dark
Conv
(
filters
=
config
.
filters
,
x
=
nn_blocks
.
Conv
BN
(
filters
=
config
.
filters
,
kernel_size
=
(
3
,
3
),
strides
=
(
1
,
1
),
padding
=
'same'
,
...
...
official/vision/beta/projects/yolo/modeling/layers/nn_blocks.py
View file @
d5747aac
"""Contains common building blocks for yolo neural networks."""
from
functools
import
partial
from
typing
import
Any
,
Callable
,
Dict
,
List
,
Optional
,
Tuple
,
Union
,
Text
import
tensorflow
as
tf
import
tensorflow.keras
as
ks
import
tensorflow.keras.backend
as
K
from
official.modeling
import
tf_utils
@
k
s
.
utils
.
register_keras_serializable
(
package
=
'yolo'
)
class
Identity
(
k
s
.
layers
.
Layer
):
@
tf
.
kera
s
.
utils
.
register_keras_serializable
(
package
=
'yolo'
)
class
Identity
(
tf
.
kera
s
.
layers
.
Layer
):
def
__init__
(
self
,
**
kwargs
):
super
().
__init__
(
**
kwargs
)
...
...
@@ -15,9 +13,9 @@ class Identity(ks.layers.Layer):
return
input
@
k
s
.
utils
.
register_keras_serializable
(
package
=
'yolo'
)
class
DarkConv
(
k
s
.
layers
.
Layer
):
'''
@
tf
.
kera
s
.
utils
.
register_keras_serializable
(
package
=
'yolo'
)
class
ConvBN
(
tf
.
kera
s
.
layers
.
Layer
):
'''
Modified Convolution layer to match that of the DarkNet Library. The Layer is a standards combination of Conv BatchNorm Activation,
however, the use of bias in the conv is determined by the use of batch normalization. The Layer also allows for feature grouping
suggested in the CSPNet paper
...
...
@@ -51,153 +49,154 @@ class DarkConv(ks.layers.Layer):
leaky_alpha: float to use as alpha if activation function is leaky
**kwargs: Keyword Arguments
'''
def
__init__
(
self
,
filters
=
1
,
kernel_size
=
(
1
,
1
),
strides
=
(
1
,
1
),
padding
=
'same'
,
dilation_rate
=
(
1
,
1
),
use_bias
=
True
,
groups
=
1
,
group_id
=
0
,
grouping_only
=
False
,
kernel_initializer
=
'glorot_uniform'
,
bias_initializer
=
'zeros'
,
bias_regularizer
=
None
,
kernel_regularizer
=
None
,
# Specify the weight decay as the default will not work.
use_bn
=
True
,
use_sync_bn
=
False
,
norm_momentum
=
0.99
,
norm_epsilon
=
0.001
,
activation
=
'leaky'
,
leaky_alpha
=
0.1
,
**
kwargs
):
# convolution params
self
.
_filters
=
filters
self
.
_kernel_size
=
kernel_size
self
.
_strides
=
strides
self
.
_padding
=
padding
self
.
_dilation_rate
=
dilation_rate
self
.
_use_bias
=
use_bias
self
.
_groups
=
groups
self
.
_group_id
=
group_id
self
.
_grouping_only
=
grouping_only
self
.
_kernel_initializer
=
kernel_initializer
self
.
_bias_initializer
=
bias_initializer
self
.
_kernel_regularizer
=
kernel_regularizer
self
.
_bias_regularizer
=
bias_regularizer
# batch normalization params
self
.
_use_bn
=
use_bn
if
self
.
_use_bn
:
self
.
_use_bias
=
False
self
.
_use_sync_bn
=
use_sync_bn
self
.
_norm_moment
=
norm_momentum
self
.
_norm_epsilon
=
norm_epsilon
if
tf
.
keras
.
backend
.
image_data_format
()
==
'channels_last'
:
# format: (batch_size, height, width, channels)
self
.
_bn_axis
=
-
1
else
:
# format: (batch_size, channels, width, height)
self
.
_bn_axis
=
1
# activation params
self
.
_activation
=
activation
self
.
_leaky_alpha
=
leaky_alpha
super
(
DarkConv
,
self
).
__init__
(
**
kwargs
)
def
build
(
self
,
input_shape
):
if
not
self
.
_grouping_only
:
kernel_size
=
self
.
_kernel_size
if
type
(
self
.
_kernel_size
)
==
int
else
self
.
_kernel_size
[
0
]
if
self
.
_padding
==
"same"
and
kernel_size
!=
1
:
self
.
_zeropad
=
ks
.
layers
.
ZeroPadding2D
(
((
1
,
1
),
(
1
,
1
)))
# symmetric padding
else
:
self
.
_zeropad
=
Identity
()
self
.
conv
=
ks
.
layers
.
Conv2D
(
filters
=
self
.
_filters
,
kernel_size
=
self
.
_kernel_size
,
strides
=
self
.
_strides
,
padding
=
"valid"
,
dilation_rate
=
self
.
_dilation_rate
,
use_bias
=
self
.
_use_bias
,
kernel_initializer
=
self
.
_kernel_initializer
,
bias_initializer
=
self
.
_bias_initializer
,
kernel_regularizer
=
self
.
_kernel_regularizer
,
bias_regularizer
=
self
.
_bias_regularizer
)
if
self
.
_use_bn
:
if
self
.
_use_sync_bn
:
self
.
bn
=
tf
.
keras
.
layers
.
experimental
.
SyncBatchNormalization
(
momentum
=
self
.
_norm_moment
,
epsilon
=
self
.
_norm_epsilon
,
axis
=
self
.
_bn_axis
)
def
__init__
(
self
,
filters
=
1
,
kernel_size
=
(
1
,
1
),
strides
=
(
1
,
1
),
padding
=
'same'
,
dilation_rate
=
(
1
,
1
),
use_bias
=
True
,
groups
=
1
,
group_id
=
0
,
grouping_only
=
False
,
kernel_initializer
=
'glorot_uniform'
,
bias_initializer
=
'zeros'
,
bias_regularizer
=
None
,
kernel_regularizer
=
None
,
# Specify the weight decay as the default will not work.
use_bn
=
True
,
use_sync_bn
=
False
,
norm_momentum
=
0.99
,
norm_epsilon
=
0.001
,
activation
=
'leaky'
,
leaky_alpha
=
0.1
,
**
kwargs
):
# convolution params
self
.
_filters
=
filters
self
.
_kernel_size
=
kernel_size
self
.
_strides
=
strides
self
.
_padding
=
padding
self
.
_dilation_rate
=
dilation_rate
self
.
_use_bias
=
use_bias
self
.
_groups
=
groups
self
.
_group_id
=
group_id
self
.
_grouping_only
=
grouping_only
self
.
_kernel_initializer
=
kernel_initializer
self
.
_bias_initializer
=
bias_initializer
self
.
_kernel_regularizer
=
kernel_regularizer
self
.
_bias_regularizer
=
bias_regularizer
# batch normalization params
self
.
_use_bn
=
use_bn
if
self
.
_use_bn
:
self
.
_use_bias
=
False
self
.
_use_sync_bn
=
use_sync_bn
self
.
_norm_moment
=
norm_momentum
self
.
_norm_epsilon
=
norm_epsilon
if
tf
.
keras
.
backend
.
image_data_format
()
==
'channels_last'
:
# format: (batch_size, height, width, channels)
self
.
_bn_axis
=
-
1
else
:
self
.
bn
=
ks
.
layers
.
BatchNormalization
(
momentum
=
self
.
_norm_moment
,
epsilon
=
self
.
_norm_epsilon
,
axis
=
self
.
_bn_axis
)
else
:
self
.
bn
=
Identity
()
if
self
.
_activation
==
'leaky'
:
alpha
=
{
"alpha"
:
self
.
_leaky_alpha
}
self
.
_activation_fn
=
partial
(
tf
.
nn
.
leaky_relu
,
**
alpha
)
elif
self
.
_activation
==
"mish"
:
self
.
_activation_fn
=
lambda
x
:
x
*
tf
.
math
.
tanh
(
tf
.
math
.
softplus
(
x
))
else
:
self
.
_activation_fn
=
tf_utils
.
get_activation
(
self
.
_activation
)
def
call
(
self
,
x
):
if
self
.
_groups
!=
1
:
x
=
tf
.
split
(
x
,
self
.
_groups
,
axis
=-
1
)
x
=
x
[
self
.
_group_id
]
# grouping
if
not
self
.
_grouping_only
:
x
=
self
.
_zeropad
(
x
)
x
=
self
.
conv
(
x
)
x
=
self
.
bn
(
x
)
x
=
self
.
_activation_fn
(
x
)
return
x
def
get_config
(
self
):
# used to store/share parameters to reconstruct the model
layer_config
=
{
"filters"
:
self
.
_filters
,
"kernel_size"
:
self
.
_kernel_size
,
"strides"
:
self
.
_strides
,
"padding"
:
self
.
_padding
,
"dilation_rate"
:
self
.
_dilation_rate
,
"use_bias"
:
self
.
_use_bias
,
"groups"
:
self
.
_groups
,
"group_id"
:
self
.
_group_id
,
"grouping_only"
:
self
.
_grouping_only
,
"kernel_initializer"
:
self
.
_kernel_initializer
,
"bias_initializer"
:
self
.
_bias_initializer
,
"bias_regularizer"
:
self
.
_bias_regularizer
,
"kernel_regularizer"
:
self
.
_kernel_regularizer
,
"use_bn"
:
self
.
_use_bn
,
"use_sync_bn"
:
self
.
_use_sync_bn
,
"norm_moment"
:
self
.
_norm_moment
,
"norm_epsilon"
:
self
.
_norm_epsilon
,
"activation"
:
self
.
_activation
,
"leaky_alpha"
:
self
.
_leaky_alpha
}
layer_config
.
update
(
super
(
DarkConv
,
self
).
get_config
())
return
layer_config
def
__repr__
(
self
):
return
repr
(
self
.
get_config
())
@
ks
.
utils
.
register_keras_serializable
(
package
=
'yolo'
)
class
DarkResidual
(
ks
.
layers
.
Layer
):
'''
# format: (batch_size, channels, width, height)
self
.
_bn_axis
=
1
# activation params
self
.
_activation
=
activation
self
.
_leaky_alpha
=
leaky_alpha
super
(
ConvBN
,
self
).
__init__
(
**
kwargs
)
def
build
(
self
,
input_shape
):
if
not
self
.
_grouping_only
:
kernel_size
=
self
.
_kernel_size
if
type
(
self
.
_kernel_size
)
==
int
else
self
.
_kernel_size
[
0
]
if
self
.
_padding
==
"same"
and
kernel_size
!=
1
:
self
.
_zeropad
=
tf
.
keras
.
layers
.
ZeroPadding2D
(
((
1
,
1
),
(
1
,
1
)))
# symmetric padding
else
:
self
.
_zeropad
=
Identity
()
self
.
conv
=
tf
.
keras
.
layers
.
Conv2D
(
filters
=
self
.
_filters
,
kernel_size
=
self
.
_kernel_size
,
strides
=
self
.
_strides
,
padding
=
"valid"
,
dilation_rate
=
self
.
_dilation_rate
,
use_bias
=
self
.
_use_bias
,
kernel_initializer
=
self
.
_kernel_initializer
,
bias_initializer
=
self
.
_bias_initializer
,
kernel_regularizer
=
self
.
_kernel_regularizer
,
bias_regularizer
=
self
.
_bias_regularizer
)
if
self
.
_use_bn
:
if
self
.
_use_sync_bn
:
self
.
bn
=
tf
.
keras
.
layers
.
experimental
.
SyncBatchNormalization
(
momentum
=
self
.
_norm_moment
,
epsilon
=
self
.
_norm_epsilon
,
axis
=
self
.
_bn_axis
)
else
:
self
.
bn
=
tf
.
keras
.
layers
.
BatchNormalization
(
momentum
=
self
.
_norm_moment
,
epsilon
=
self
.
_norm_epsilon
,
axis
=
self
.
_bn_axis
)
else
:
self
.
bn
=
Identity
()
if
self
.
_activation
==
'leaky'
:
self
.
_activation_fn
=
tf
.
keras
.
layers
.
LeakyReLU
(
alpha
=
self
.
_leaky_alpha
)
elif
self
.
_activation
==
"mish"
:
self
.
_activation_fn
=
lambda
x
:
x
*
tf
.
math
.
tanh
(
tf
.
math
.
softplus
(
x
))
else
:
self
.
_activation_fn
=
tf_utils
.
get_activation
(
self
.
_activation
)
def
call
(
self
,
x
):
if
self
.
_groups
!=
1
:
x
=
tf
.
split
(
x
,
self
.
_groups
,
axis
=-
1
)
x
=
x
[
self
.
_group_id
]
if
not
self
.
_grouping_only
:
x
=
self
.
_zeropad
(
x
)
x
=
self
.
conv
(
x
)
x
=
self
.
bn
(
x
)
x
=
self
.
_activation_fn
(
x
)
return
x
def
get_config
(
self
):
# used to store/share parameters to reconstruct the model
layer_config
=
{
"filters"
:
self
.
_filters
,
"kernel_size"
:
self
.
_kernel_size
,
"strides"
:
self
.
_strides
,
"padding"
:
self
.
_padding
,
"dilation_rate"
:
self
.
_dilation_rate
,
"use_bias"
:
self
.
_use_bias
,
"groups"
:
self
.
_groups
,
"group_id"
:
self
.
_group_id
,
"grouping_only"
:
self
.
_grouping_only
,
"kernel_initializer"
:
self
.
_kernel_initializer
,
"bias_initializer"
:
self
.
_bias_initializer
,
"bias_regularizer"
:
self
.
_bias_regularizer
,
"kernel_regularizer"
:
self
.
_kernel_regularizer
,
"use_bn"
:
self
.
_use_bn
,
"use_sync_bn"
:
self
.
_use_sync_bn
,
"norm_moment"
:
self
.
_norm_moment
,
"norm_epsilon"
:
self
.
_norm_epsilon
,
"activation"
:
self
.
_activation
,
"leaky_alpha"
:
self
.
_leaky_alpha
}
layer_config
.
update
(
super
(
ConvBN
,
self
).
get_config
())
return
layer_config
def
__repr__
(
self
):
return
repr
(
self
.
get_config
())
@
tf
.
keras
.
utils
.
register_keras_serializable
(
package
=
'yolo'
)
class
DarkResidual
(
tf
.
keras
.
layers
.
Layer
):
'''
DarkNet block with Residual connection for Yolo v3 Backbone
Args:
...
...
@@ -221,127 +220,128 @@ class DarkResidual(ks.layers.Layer):
**kwargs: Keyword Arguments
'''
def
__init__
(
self
,
filters
=
1
,
filter_scale
=
2
,
use_bias
=
True
,
kernel_initializer
=
'glorot_uniform'
,
bias_initializer
=
'zeros'
,
kernel_regularizer
=
None
,
bias_regularizer
=
None
,
use_bn
=
True
,
use_sync_bn
=
False
,
norm_momentum
=
0.99
,
norm_epsilon
=
0.001
,
activation
=
'leaky'
,
leaky_alpha
=
0.1
,
sc_activation
=
'linear'
,
downsample
=
False
,
**
kwargs
):
# downsample
self
.
_downsample
=
downsample
# darkconv params
self
.
_filters
=
filters
self
.
_filter_scale
=
filter_scale
self
.
_use_bias
=
use_bias
self
.
_kernel_initializer
=
kernel_initializer
self
.
_bias_initializer
=
bias_initializer
self
.
_bias_regularizer
=
bias_regularizer
self
.
_use_bn
=
use_bn
self
.
_use_sync_bn
=
use_sync_bn
self
.
_kernel_regularizer
=
kernel_regularizer
# normal params
self
.
_norm_moment
=
norm_momentum
self
.
_norm_epsilon
=
norm_epsilon
# activation params
self
.
_conv_activation
=
activation
self
.
_leaky_alpha
=
leaky_alpha
self
.
_sc_activation
=
sc_activation
super
().
__init__
(
**
kwargs
)
def
build
(
self
,
input_shape
):
_dark_conv_args
=
{
"use_bias"
:
self
.
_use_bias
,
"kernel_initializer"
:
self
.
_kernel_initializer
,
"bias_initializer"
:
self
.
_bias_initializer
,
"bias_regularizer"
:
self
.
_bias_regularizer
,
"use_bn"
:
self
.
_use_bn
,
"use_sync_bn"
:
self
.
_use_sync_bn
,
"norm_momentum"
:
self
.
_norm_moment
,
"norm_epsilon"
:
self
.
_norm_epsilon
,
"activation"
:
self
.
_conv_activation
,
"kernel_regularizer"
:
self
.
_kernel_regularizer
,
"leaky_alpha"
:
self
.
_leaky_alpha
}
if
self
.
_downsample
:
self
.
_dconv
=
DarkConv
(
filters
=
self
.
_filters
,
def
__init__
(
self
,
filters
=
1
,
filter_scale
=
2
,
use_bias
=
True
,
kernel_initializer
=
'glorot_uniform'
,
bias_initializer
=
'zeros'
,
kernel_regularizer
=
None
,
bias_regularizer
=
None
,
use_bn
=
True
,
use_sync_bn
=
False
,
norm_momentum
=
0.99
,
norm_epsilon
=
0.001
,
activation
=
'leaky'
,
leaky_alpha
=
0.1
,
sc_activation
=
'linear'
,
downsample
=
False
,
**
kwargs
):
# downsample
self
.
_downsample
=
downsample
# ConvBN params
self
.
_filters
=
filters
self
.
_filter_scale
=
filter_scale
self
.
_use_bias
=
use_bias
self
.
_kernel_initializer
=
kernel_initializer
self
.
_bias_initializer
=
bias_initializer
self
.
_bias_regularizer
=
bias_regularizer
self
.
_use_bn
=
use_bn
self
.
_use_sync_bn
=
use_sync_bn
self
.
_kernel_regularizer
=
kernel_regularizer
# normal params
self
.
_norm_moment
=
norm_momentum
self
.
_norm_epsilon
=
norm_epsilon
# activation params
self
.
_conv_activation
=
activation
self
.
_leaky_alpha
=
leaky_alpha
self
.
_sc_activation
=
sc_activation
super
().
__init__
(
**
kwargs
)
def
build
(
self
,
input_shape
):
_dark_conv_args
=
{
"use_bias"
:
self
.
_use_bias
,
"kernel_initializer"
:
self
.
_kernel_initializer
,
"bias_initializer"
:
self
.
_bias_initializer
,
"bias_regularizer"
:
self
.
_bias_regularizer
,
"use_bn"
:
self
.
_use_bn
,
"use_sync_bn"
:
self
.
_use_sync_bn
,
"norm_momentum"
:
self
.
_norm_moment
,
"norm_epsilon"
:
self
.
_norm_epsilon
,
"activation"
:
self
.
_conv_activation
,
"kernel_regularizer"
:
self
.
_kernel_regularizer
,
"leaky_alpha"
:
self
.
_leaky_alpha
}
if
self
.
_downsample
:
self
.
_dconv
=
ConvBN
(
filters
=
self
.
_filters
,
kernel_size
=
(
3
,
3
),
strides
=
(
2
,
2
),
padding
=
'same'
,
**
_dark_conv_args
)
else
:
self
.
_dconv
=
Identity
()
self
.
_conv1
=
ConvBN
(
filters
=
self
.
_filters
//
self
.
_filter_scale
,
kernel_size
=
(
1
,
1
),
strides
=
(
1
,
1
),
padding
=
'same'
,
**
_dark_conv_args
)
self
.
_conv2
=
ConvBN
(
filters
=
self
.
_filters
,
kernel_size
=
(
3
,
3
),
strides
=
(
2
,
2
),
strides
=
(
1
,
1
),
padding
=
'same'
,
**
_dark_conv_args
)
else
:
self
.
_dconv
=
Identity
()
self
.
_conv1
=
DarkConv
(
filters
=
self
.
_filters
//
self
.
_filter_scale
,
kernel_size
=
(
1
,
1
),
strides
=
(
1
,
1
),
padding
=
'same'
,
**
_dark_conv_args
)
self
.
_conv2
=
DarkConv
(
filters
=
self
.
_filters
,
kernel_size
=
(
3
,
3
),
strides
=
(
1
,
1
),
padding
=
'same'
,
**
_dark_conv_args
)
self
.
_shortcut
=
ks
.
layers
.
Add
()
# self._activation_fn = ks.layers.Activation(activation=self._sc_activation)
if
self
.
_sc_activation
==
'leaky'
:
alpha
=
{
"alpha"
:
self
.
_leaky_alpha
}
self
.
_activation_fn
=
partial
(
tf
.
nn
.
leaky_relu
,
**
alpha
)
elif
self
.
_sc_activation
==
"mish"
:
self
.
_activation_fn
=
lambda
x
:
x
*
tf
.
math
.
tanh
(
tf
.
math
.
softplus
(
x
))
else
:
self
.
_activation_fn
=
tf_utils
.
get_activation
(
self
.
_sc_activation
)
super
().
build
(
input_shape
)
def
call
(
self
,
inputs
):
shortcut
=
self
.
_dconv
(
inputs
)
x
=
self
.
_conv1
(
shortcut
)
x
=
self
.
_conv2
(
x
)
x
=
self
.
_shortcut
([
x
,
shortcut
])
return
self
.
_activation_fn
(
x
)
def
get_config
(
self
):
# used to store/share parameters to reconstruct the model
layer_config
=
{
"filters"
:
self
.
_filters
,
"use_bias"
:
self
.
_use_bias
,
"kernel_initializer"
:
self
.
_kernel_initializer
,
"bias_initializer"
:
self
.
_bias_initializer
,
"kernel_regularizer"
:
self
.
_kernel_regularizer
,
"use_bn"
:
self
.
_use_bn
,
"use_sync_bn"
:
self
.
_use_sync_bn
,
"norm_moment"
:
self
.
_norm_moment
,
"norm_epsilon"
:
self
.
_norm_epsilon
,
"activation"
:
self
.
_conv_activation
,
"leaky_alpha"
:
self
.
_leaky_alpha
,
"sc_activation"
:
self
.
_sc_activation
,
"downsample"
:
self
.
_downsample
}
layer_config
.
update
(
super
().
get_config
())
return
layer_config
@
ks
.
utils
.
register_keras_serializable
(
package
=
'yolo'
)
class
CSPTiny
(
ks
.
layers
.
Layer
):
"""
self
.
_shortcut
=
tf
.
keras
.
layers
.
Add
()
if
self
.
_sc_activation
==
'leaky'
:
self
.
_activation_fn
=
tf
.
keras
.
layers
.
LeakyReLU
(
alpha
=
self
.
_leaky_alpha
)
elif
self
.
_sc_activation
==
"mish"
:
self
.
_activation_fn
=
lambda
x
:
x
*
tf
.
math
.
tanh
(
tf
.
math
.
softplus
(
x
))
else
:
self
.
_activation_fn
=
tf_utils
.
get_activation
(
self
.
_sc_activation
)
super
().
build
(
input_shape
)
def
call
(
self
,
inputs
):
shortcut
=
self
.
_dconv
(
inputs
)
x
=
self
.
_conv1
(
shortcut
)
x
=
self
.
_conv2
(
x
)
x
=
self
.
_shortcut
([
x
,
shortcut
])
return
self
.
_activation_fn
(
x
)
def
get_config
(
self
):
# used to store/share parameters to reconstruct the model
layer_config
=
{
"filters"
:
self
.
_filters
,
"use_bias"
:
self
.
_use_bias
,
"kernel_initializer"
:
self
.
_kernel_initializer
,
"bias_initializer"
:
self
.
_bias_initializer
,
"kernel_regularizer"
:
self
.
_kernel_regularizer
,
"use_bn"
:
self
.
_use_bn
,
"use_sync_bn"
:
self
.
_use_sync_bn
,
"norm_moment"
:
self
.
_norm_moment
,
"norm_epsilon"
:
self
.
_norm_epsilon
,
"activation"
:
self
.
_conv_activation
,
"leaky_alpha"
:
self
.
_leaky_alpha
,
"sc_activation"
:
self
.
_sc_activation
,
"downsample"
:
self
.
_downsample
}
layer_config
.
update
(
super
().
get_config
())
return
layer_config
@
tf
.
keras
.
utils
.
register_keras_serializable
(
package
=
'yolo'
)
class
CSPTiny
(
tf
.
keras
.
layers
.
Layer
):
"""
A Small size convolution block proposed in the CSPNet. The layer uses shortcuts, routing(concatnation), and feature grouping
in order to improve gradient variablity and allow for high efficency, low power residual learning for small networ
k
s.
in order to improve gradient variablity and allow for high efficency, low power residual learning for small networ
tf.kera
s.
Cross Stage Partial networks (CSPNets) were proposed in:
[1] Chien-Yao Wang, Hong-Yuan Mark Liao, I-Hau Yeh, Yueh-Hua Wu, Ping-Yang Chen, Jun-Wei Hsieh
...
...
@@ -369,142 +369,142 @@ class CSPTiny(ks.layers.Layer):
so the dimensions are forced to match
**kwargs: Keyword Arguments
"""
def
__init__
(
self
,
filters
=
1
,
use_bias
=
True
,
kernel_initializer
=
'glorot_uniform
'
,
bias_initializer
=
'zeros'
,
bias
_regularizer
=
None
,
kernel_regularizer
=
Non
e
,
use_bn
=
Tru
e
,
use_sync_bn
=
False
,
group
_id
=
1
,
groups
=
2
,
norm_momentum
=
0.99
,
norm_epsilon
=
0.001
,
activation
=
'leaky'
,
downsample
=
True
,
leaky_alpha
=
0.1
,
**
kwargs
):
# darkconv param
s
self
.
_
filters
=
filter
s
self
.
_
use_bias
=
use_bias
self
.
_
kernel
_initializer
=
kernel
_initializer
self
.
_bias_
initial
izer
=
bias_
initial
izer
self
.
_
bias_regularizer
=
bias_regularizer
self
.
_use_bn
=
use_bn
self
.
_
use_sync_bn
=
use_sync_bn
self
.
_
kernel_regularizer
=
kernel_regularizer
self
.
_group
s
=
group
s
self
.
_
group_id
=
group_id
self
.
_downsample
=
downsample
# normal params
self
.
_norm_
moment
=
norm_
momentum
self
.
_norm_epsilon
=
norm_epsilon
# activation params
self
.
_
conv_activation
=
activation
self
.
_leaky_alpha
=
leaky_alpha
super
().
__init__
(
**
kwargs
)
def
build
(
self
,
input_shape
):
_dark_conv_args
=
{
"use_bias"
:
self
.
_use_bias
,
"kernel_initializer"
:
self
.
_kernel_initializer
,
"bias_initializer"
:
self
.
_bias_initializer
,
"bias_regularizer"
:
self
.
_bias_regularizer
,
"use_bn"
:
self
.
_use_bn
,
"use_sync_bn"
:
self
.
_use_sync_bn
,
"norm_momentum"
:
self
.
_norm_moment
,
"norm_epsilon"
:
self
.
_norm_epsilon
,
"activation"
:
self
.
_conv_activation
,
"kernel_regularizer"
:
self
.
_kernel_regularizer
,
"leaky_alpha"
:
self
.
_leaky_alpha
}
self
.
_convlayer1
=
Dark
Conv
(
filters
=
self
.
_filters
,
kernel_size
=
(
3
,
3
),
strides
=
(
1
,
1
),
padding
=
'same'
,
**
_dark_conv_args
)
self
.
_convlayer2
=
Dark
Conv
(
filters
=
self
.
_filters
//
2
,
kernel_size
=
(
3
,
3
),
strides
=
(
1
,
1
),
padding
=
'same'
,
use_bias
=
self
.
_use_bias
,
groups
=
self
.
_groups
,
group_id
=
self
.
_group_id
,
kernel_initializer
=
self
.
_kernel_initializer
,
bias_initializer
=
self
.
_bias_initializer
,
bias_regularizer
=
self
.
_bias_regularizer
,
kernel_regularizer
=
self
.
_kernel_regularizer
,
use_bn
=
self
.
_use_bn
,
use_sync_bn
=
self
.
_use_sync_bn
,
norm_momentum
=
self
.
_norm_moment
,
norm_epsilon
=
self
.
_norm_epsilon
,
activation
=
self
.
_conv_activation
,
leaky_alpha
=
self
.
_leaky_alpha
)
self
.
_convlayer3
=
Dark
Conv
(
filters
=
self
.
_filters
//
2
,
kernel_size
=
(
3
,
3
),
strides
=
(
1
,
1
),
padding
=
'same'
,
**
_dark_conv_args
)
self
.
_convlayer4
=
Dark
Conv
(
filters
=
self
.
_filters
,
kernel_size
=
(
1
,
1
),
strides
=
(
1
,
1
),
padding
=
'same'
,
**
_dark_conv_args
)
self
.
_maxpool
=
tf
.
keras
.
layers
.
MaxPool2D
(
pool_size
=
2
,
strides
=
2
,
padding
=
"same"
,
data_format
=
None
)
super
().
build
(
input_shape
)
def
call
(
self
,
inputs
):
x1
=
self
.
_convlayer1
(
inputs
)
x2
=
self
.
_convlayer2
(
x1
)
# grouping
x3
=
self
.
_convlayer3
(
x2
)
x4
=
tf
.
concat
([
x3
,
x2
],
axis
=-
1
)
# csp partial using grouping
x5
=
self
.
_convlayer4
(
x4
)
x
=
tf
.
concat
([
x1
,
x5
],
axis
=-
1
)
# csp connect
if
self
.
_downsample
:
x
=
self
.
_maxpool
(
x
)
return
x
,
x5
def
get_config
(
self
):
# used to store/share parameters to reconsturct the model
layer_config
=
{
"filters"
:
self
.
_filters
,
"use_bias"
:
self
.
_use_bias
,
"strides"
:
self
.
_strides
,
"kernel_initializer"
:
self
.
_kernel_initializer
,
"bias_initializer"
:
self
.
_bias_initializer
,
"kernel_regularizer"
:
self
.
_kernel_regularizer
,
"use_bn"
:
self
.
_use_bn
,
"use_sync_bn"
:
self
.
_use_sync_bn
,
"norm_moment"
:
self
.
_norm_moment
,
"norm_epsilon"
:
self
.
_norm_epsilon
,
"activation"
:
self
.
_conv_activation
,
"leaky_alpha"
:
self
.
_leaky_alpha
,
"sc_activation"
:
self
.
_sc_activation
,
}
layer_config
.
update
(
super
().
get_config
())
return
layer_config
@
k
s
.
utils
.
register_keras_serializable
(
package
=
'yolo'
)
class
CSP
DownSample
(
k
s
.
layers
.
Layer
):
"""
def
__init__
(
self
,
filters
=
1
,
use_bias
=
True
,
kernel_initializer
=
'glorot_uniform'
,
bias_initializer
=
'zeros
'
,
bias_regularizer
=
None
,
kernel
_regularizer
=
None
,
use_bn
=
Tru
e
,
use_sync_bn
=
Fals
e
,
group_id
=
1
,
group
s
=
2
,
norm_momentum
=
0.99
,
norm_epsilon
=
0.001
,
activation
=
'leaky'
,
downsample
=
True
,
leaky_alpha
=
0.1
,
**
kwargs
):
# ConvBN params
self
.
_filters
=
filter
s
self
.
_
use_bias
=
use_bia
s
self
.
_
kernel_initializer
=
kernel_initializer
self
.
_
bias
_initializer
=
bias
_initializer
self
.
_bias_
regular
izer
=
bias_
regular
izer
self
.
_
use_bn
=
use_bn
self
.
_use_
sync_
bn
=
use_
sync_
bn
self
.
_
kernel_regularizer
=
kernel_regularizer
self
.
_
groups
=
groups
self
.
_group
_id
=
group
_id
self
.
_
downsample
=
downsample
# normal params
self
.
_norm_moment
=
norm_momentum
self
.
_norm_
epsilon
=
norm_
epsilon
# activation params
self
.
_conv_activation
=
activation
self
.
_
leaky_alpha
=
leaky_alpha
super
().
__init__
(
**
kwargs
)
def
build
(
self
,
input_shape
):
_dark_conv_args
=
{
"use_bias"
:
self
.
_use_bias
,
"kernel_initializer"
:
self
.
_kernel_initializer
,
"bias_initializer"
:
self
.
_bias_initializer
,
"bias_regularizer"
:
self
.
_bias_regularizer
,
"use_bn"
:
self
.
_use_bn
,
"use_sync_bn"
:
self
.
_use_sync_bn
,
"norm_momentum"
:
self
.
_norm_moment
,
"norm_epsilon"
:
self
.
_norm_epsilon
,
"activation"
:
self
.
_conv_activation
,
"kernel_regularizer"
:
self
.
_kernel_regularizer
,
"leaky_alpha"
:
self
.
_leaky_alpha
}
self
.
_convlayer1
=
Conv
BN
(
filters
=
self
.
_filters
,
kernel_size
=
(
3
,
3
),
strides
=
(
1
,
1
),
padding
=
'same'
,
**
_dark_conv_args
)
self
.
_convlayer2
=
Conv
BN
(
filters
=
self
.
_filters
//
2
,
kernel_size
=
(
3
,
3
),
strides
=
(
1
,
1
),
padding
=
'same'
,
use_bias
=
self
.
_use_bias
,
groups
=
self
.
_groups
,
group_id
=
self
.
_group_id
,
kernel_initializer
=
self
.
_kernel_initializer
,
bias_initializer
=
self
.
_bias_initializer
,
bias_regularizer
=
self
.
_bias_regularizer
,
kernel_regularizer
=
self
.
_kernel_regularizer
,
use_bn
=
self
.
_use_bn
,
use_sync_bn
=
self
.
_use_sync_bn
,
norm_momentum
=
self
.
_norm_moment
,
norm_epsilon
=
self
.
_norm_epsilon
,
activation
=
self
.
_conv_activation
,
leaky_alpha
=
self
.
_leaky_alpha
)
self
.
_convlayer3
=
Conv
BN
(
filters
=
self
.
_filters
//
2
,
kernel_size
=
(
3
,
3
),
strides
=
(
1
,
1
),
padding
=
'same'
,
**
_dark_conv_args
)
self
.
_convlayer4
=
Conv
BN
(
filters
=
self
.
_filters
,
kernel_size
=
(
1
,
1
),
strides
=
(
1
,
1
),
padding
=
'same'
,
**
_dark_conv_args
)
self
.
_maxpool
=
tf
.
keras
.
layers
.
MaxPool2D
(
pool_size
=
2
,
strides
=
2
,
padding
=
"same"
,
data_format
=
None
)
super
().
build
(
input_shape
)
def
call
(
self
,
inputs
):
x1
=
self
.
_convlayer1
(
inputs
)
x2
=
self
.
_convlayer2
(
x1
)
# grouping
x3
=
self
.
_convlayer3
(
x2
)
x4
=
tf
.
concat
([
x3
,
x2
],
axis
=-
1
)
# csp partial using grouping
x5
=
self
.
_convlayer4
(
x4
)
x
=
tf
.
concat
([
x1
,
x5
],
axis
=-
1
)
# csp connect
if
self
.
_downsample
:
x
=
self
.
_maxpool
(
x
)
return
x
,
x5
def
get_config
(
self
):
# used to store/share parameters to reconsturct the model
layer_config
=
{
"filters"
:
self
.
_filters
,
"use_bias"
:
self
.
_use_bias
,
"strides"
:
self
.
_strides
,
"kernel_initializer"
:
self
.
_kernel_initializer
,
"bias_initializer"
:
self
.
_bias_initializer
,
"kernel_regularizer"
:
self
.
_kernel_regularizer
,
"use_bn"
:
self
.
_use_bn
,
"use_sync_bn"
:
self
.
_use_sync_bn
,
"norm_moment"
:
self
.
_norm_moment
,
"norm_epsilon"
:
self
.
_norm_epsilon
,
"activation"
:
self
.
_conv_activation
,
"leaky_alpha"
:
self
.
_leaky_alpha
,
"sc_activation"
:
self
.
_sc_activation
,
}
layer_config
.
update
(
super
().
get_config
())
return
layer_config
@
tf
.
kera
s
.
utils
.
register_keras_serializable
(
package
=
'yolo'
)
class
CSP
Route
(
tf
.
kera
s
.
layers
.
Layer
):
"""
Down sampling layer to take the place of down sampleing done in Residual networks. This is
the first of 2 layers needed to convert any Residual Network model to a CSPNet. At the start of a new
level change, this CSP
Downsampl
e layer creates a learned identity that will act as a cross stage connection,
level change, this CSP
Rout
e layer creates a learned identity that will act as a cross stage connection,
that is used to inform the inputs to the next stage. It is called cross stage partial because the number of filters
required in every intermitent Residual layer is reduced by half. The sister layer will take the partial generated by
this layer and concatnate it with the output of the final residual layer in the stack to create a fully feature level
...
...
@@ -518,7 +518,8 @@ class CSPDownSample(ks.layers.Layer):
Args:
filters: integer for output depth, or the number of features to learn
filter_reduce: integer dicating (filters//2) or the number of filters in the partial feature stack
filter_scale: integer dicating (filters//2) or the number of filters in the partial feature stack
downsample: down_sample the input
activation: string for activation function to use in layer
kernel_initializer: string to indicate which function to use to initialize weights
bias_initializer: string to indicate which function to use to initialize bias
...
...
@@ -531,73 +532,81 @@ class CSPDownSample(ks.layers.Layer):
norm_epsilon: float for batch normalization epsilon
**kwargs: Keyword Arguments
"""
def
__init__
(
self
,
filters
,
filter_reduce
=
2
,
activation
=
"mish"
,
kernel_initializer
=
'glorot_uniform'
,
bias_initializer
=
'zeros'
,
bias_regularizer
=
None
,
kernel_regularizer
=
None
,
use_bn
=
True
,
use_sync_bn
=
False
,
norm_momentum
=
0.99
,
norm_epsilon
=
0.001
,
**
kwargs
):
super
().
__init__
(
**
kwargs
)
#layer params
self
.
_filters
=
filters
self
.
_filter_reduce
=
filter_reduce
self
.
_activation
=
activation
#convoultion params
self
.
_kernel_initializer
=
kernel_initializer
self
.
_bias_initializer
=
bias_initializer
self
.
_kernel_regularizer
=
kernel_regularizer
self
.
_bias_regularizer
=
bias_regularizer
self
.
_use_bn
=
use_bn
self
.
_use_sync_bn
=
use_sync_bn
self
.
_norm_moment
=
norm_momentum
self
.
_norm_epsilon
=
norm_epsilon
def
build
(
self
,
input_shape
):
_dark_conv_args
=
{
"kernel_initializer"
:
self
.
_kernel_initializer
,
"bias_initializer"
:
self
.
_bias_initializer
,
"bias_regularizer"
:
self
.
_bias_regularizer
,
"use_bn"
:
self
.
_use_bn
,
"use_sync_bn"
:
self
.
_use_sync_bn
,
"norm_momentum"
:
self
.
_norm_moment
,
"norm_epsilon"
:
self
.
_norm_epsilon
,
"activation"
:
self
.
_activation
,
"kernel_regularizer"
:
self
.
_kernel_regularizer
,
}
self
.
_conv1
=
DarkConv
(
filters
=
self
.
_filters
,
kernel_size
=
(
3
,
3
),
strides
=
(
2
,
2
),
**
_dark_conv_args
)
self
.
_conv2
=
DarkConv
(
filters
=
self
.
_filters
//
self
.
_filter_reduce
,
kernel_size
=
(
1
,
1
),
strides
=
(
1
,
1
),
**
_dark_conv_args
)
self
.
_conv3
=
DarkConv
(
filters
=
self
.
_filters
//
self
.
_filter_reduce
,
kernel_size
=
(
1
,
1
),
strides
=
(
1
,
1
),
**
_dark_conv_args
)
def
call
(
self
,
inputs
):
x
=
self
.
_conv1
(
inputs
)
y
=
self
.
_conv2
(
x
)
x
=
self
.
_conv3
(
x
)
return
(
x
,
y
)
@
ks
.
utils
.
register_keras_serializable
(
package
=
'yolo'
)
class
CSPConnect
(
ks
.
layers
.
Layer
):
"""
Sister Layer to the CSPDownsample layer. Merges the partial feature stacks generated by the CSPDownsampling layer,
def
__init__
(
self
,
filters
,
filter_scale
=
2
,
activation
=
"mish"
,
kernel_initializer
=
'glorot_uniform'
,
bias_initializer
=
'zeros'
,
bias_regularizer
=
None
,
kernel_regularizer
=
None
,
use_bn
=
True
,
use_sync_bn
=
False
,
norm_momentum
=
0.99
,
norm_epsilon
=
0.001
,
downsample
=
True
,
**
kwargs
):
super
().
__init__
(
**
kwargs
)
#layer params
self
.
_filters
=
filters
self
.
_filter_scale
=
filter_scale
self
.
_activation
=
activation
#convoultion params
self
.
_kernel_initializer
=
kernel_initializer
self
.
_bias_initializer
=
bias_initializer
self
.
_kernel_regularizer
=
kernel_regularizer
self
.
_bias_regularizer
=
bias_regularizer
self
.
_use_bn
=
use_bn
self
.
_use_sync_bn
=
use_sync_bn
self
.
_norm_moment
=
norm_momentum
self
.
_norm_epsilon
=
norm_epsilon
self
.
_downsample
=
downsample
def
build
(
self
,
input_shape
):
_dark_conv_args
=
{
"kernel_initializer"
:
self
.
_kernel_initializer
,
"bias_initializer"
:
self
.
_bias_initializer
,
"bias_regularizer"
:
self
.
_bias_regularizer
,
"use_bn"
:
self
.
_use_bn
,
"use_sync_bn"
:
self
.
_use_sync_bn
,
"norm_momentum"
:
self
.
_norm_moment
,
"norm_epsilon"
:
self
.
_norm_epsilon
,
"activation"
:
self
.
_activation
,
"kernel_regularizer"
:
self
.
_kernel_regularizer
,
}
if
self
.
_downsample
:
self
.
_conv1
=
ConvBN
(
filters
=
self
.
_filters
,
kernel_size
=
(
3
,
3
),
strides
=
(
2
,
2
),
**
_dark_conv_args
)
else
:
self
.
_conv1
=
ConvBN
(
filters
=
self
.
_filters
,
kernel_size
=
(
3
,
3
),
strides
=
(
1
,
1
),
**
_dark_conv_args
)
self
.
_conv2
=
ConvBN
(
filters
=
self
.
_filters
//
self
.
_filter_scale
,
kernel_size
=
(
1
,
1
),
strides
=
(
1
,
1
),
**
_dark_conv_args
)
self
.
_conv3
=
ConvBN
(
filters
=
self
.
_filters
//
self
.
_filter_scale
,
kernel_size
=
(
1
,
1
),
strides
=
(
1
,
1
),
**
_dark_conv_args
)
def
call
(
self
,
inputs
):
x
=
self
.
_conv1
(
inputs
)
y
=
self
.
_conv2
(
x
)
x
=
self
.
_conv3
(
x
)
return
(
x
,
y
)
@
tf
.
keras
.
utils
.
register_keras_serializable
(
package
=
'yolo'
)
class
CSPConnect
(
tf
.
keras
.
layers
.
Layer
):
"""
Sister Layer to the CSPRoute layer. Merges the partial feature stacks generated by the CSPDownsampling layer,
and the finaly output of the residual stack. Suggested in the CSPNet paper.
Cross Stage Partial networks (CSPNets) were proposed in:
...
...
@@ -606,7 +615,7 @@ class CSPConnect(ks.layers.Layer):
Args:
filters: integer for output depth, or the number of features to learn
filter_
reduc
e: integer dicating (filters//2) or the number of filters in the partial feature stack
filter_
scal
e: integer dicating (filters//2) or the number of filters in the partial feature stack
activation: string for activation function to use in layer
kernel_initializer: string to indicate which function to use to initialize weights
bias_initializer: string to indicate which function to use to initialize bias
...
...
@@ -619,62 +628,160 @@ class CSPConnect(ks.layers.Layer):
norm_epsilon: float for batch normalization epsilon
**kwargs: Keyword Arguments
"""
def
__init__
(
self
,
filters
,
filter_reduce
=
2
,
activation
=
"mish"
,
kernel_initializer
=
'glorot_uniform'
,
bias_initializer
=
'zeros'
,
bias_regularizer
=
None
,
kernel_regularizer
=
None
,
use_bn
=
True
,
use_sync_bn
=
False
,
norm_momentum
=
0.99
,
norm_epsilon
=
0.001
,
**
kwargs
):
super
().
__init__
(
**
kwargs
)
#layer params
self
.
_filters
=
filters
self
.
_filter_reduce
=
filter_reduce
self
.
_activation
=
activation
#convoultion params
self
.
_kernel_initializer
=
kernel_initializer
self
.
_bias_initializer
=
bias_initializer
self
.
_kernel_regularizer
=
kernel_regularizer
self
.
_bias_regularizer
=
bias_regularizer
self
.
_use_bn
=
use_bn
self
.
_use_sync_bn
=
use_sync_bn
self
.
_norm_moment
=
norm_momentum
self
.
_norm_epsilon
=
norm_epsilon
def
build
(
self
,
input_shape
):
_dark_conv_args
=
{
"kernel_initializer"
:
self
.
_kernel_initializer
,
"bias_initializer"
:
self
.
_bias_initializer
,
"bias_regularizer"
:
self
.
_bias_regularizer
,
"use_bn"
:
self
.
_use_bn
,
"use_sync_bn"
:
self
.
_use_sync_bn
,
"norm_momentum"
:
self
.
_norm_moment
,
"norm_epsilon"
:
self
.
_norm_epsilon
,
"activation"
:
self
.
_activation
,
"kernel_regularizer"
:
self
.
_kernel_regularizer
,
}
self
.
_conv1
=
DarkConv
(
filters
=
self
.
_filters
//
self
.
_filter_reduce
,
kernel_size
=
(
1
,
1
),
strides
=
(
1
,
1
),
**
_dark_conv_args
)
self
.
_concat
=
ks
.
layers
.
Concatenate
(
axis
=-
1
)
self
.
_conv2
=
DarkConv
(
filters
=
self
.
_filters
,
kernel_size
=
(
1
,
1
),
strides
=
(
1
,
1
),
**
_dark_conv_args
)
def
call
(
self
,
inputs
):
x_prev
,
x_csp
=
inputs
x
=
self
.
_conv1
(
x_prev
)
x
=
self
.
_concat
([
x
,
x_csp
])
x
=
self
.
_conv2
(
x
)
return
x
def
__init__
(
self
,
filters
,
filter_scale
=
2
,
activation
=
"mish"
,
kernel_initializer
=
'glorot_uniform'
,
bias_initializer
=
'zeros'
,
bias_regularizer
=
None
,
kernel_regularizer
=
None
,
use_bn
=
True
,
use_sync_bn
=
False
,
norm_momentum
=
0.99
,
norm_epsilon
=
0.001
,
**
kwargs
):
super
().
__init__
(
**
kwargs
)
#layer params
self
.
_filters
=
filters
self
.
_filter_scale
=
filter_scale
self
.
_activation
=
activation
#convoultion params
self
.
_kernel_initializer
=
kernel_initializer
self
.
_bias_initializer
=
bias_initializer
self
.
_kernel_regularizer
=
kernel_regularizer
self
.
_bias_regularizer
=
bias_regularizer
self
.
_use_bn
=
use_bn
self
.
_use_sync_bn
=
use_sync_bn
self
.
_norm_moment
=
norm_momentum
self
.
_norm_epsilon
=
norm_epsilon
def
build
(
self
,
input_shape
):
_dark_conv_args
=
{
"kernel_initializer"
:
self
.
_kernel_initializer
,
"bias_initializer"
:
self
.
_bias_initializer
,
"bias_regularizer"
:
self
.
_bias_regularizer
,
"use_bn"
:
self
.
_use_bn
,
"use_sync_bn"
:
self
.
_use_sync_bn
,
"norm_momentum"
:
self
.
_norm_moment
,
"norm_epsilon"
:
self
.
_norm_epsilon
,
"activation"
:
self
.
_activation
,
"kernel_regularizer"
:
self
.
_kernel_regularizer
,
}
self
.
_conv1
=
ConvBN
(
filters
=
self
.
_filters
//
self
.
_filter_scale
,
kernel_size
=
(
1
,
1
),
strides
=
(
1
,
1
),
**
_dark_conv_args
)
self
.
_concat
=
tf
.
keras
.
layers
.
Concatenate
(
axis
=-
1
)
self
.
_conv2
=
ConvBN
(
filters
=
self
.
_filters
,
kernel_size
=
(
1
,
1
),
strides
=
(
1
,
1
),
**
_dark_conv_args
)
def
call
(
self
,
inputs
):
x_prev
,
x_csp
=
inputs
x
=
self
.
_conv1
(
x_prev
)
x
=
self
.
_concat
([
x
,
x_csp
])
x
=
self
.
_conv2
(
x
)
return
x
class
CSPStack
(
tf
.
keras
.
layers
.
Layer
):
"""
CSP full stack, combines the route and the connect in case you dont want to jsut quickly wrap an existing callable or list of layers to
make it a cross stage partial. Added for ease of use. you should be able to wrap any layer stack with a CSP independent of wether it belongs
to the Darknet family. if filter_scale = 2, then the blocks in the stack passed into the the CSP stack should also have filters = filters/filter_scale
Cross Stage Partial networks (CSPNets) were proposed in:
[1] Chien-Yao Wang, Hong-Yuan Mark Liao, I-Hau Yeh, Yueh-Hua Wu, Ping-Yang Chen, Jun-Wei Hsieh
CSPNet: A New Backbone that can Enhance Learning Capability of CNN. arXiv:1911.11929
Args:
model_to_wrap: callable Model or a list of callable objects that will process the output of CSPRoute, and be input into CSPConnect.
list will be called sequentially.
downsample: down_sample the input
filters: integer for output depth, or the number of features to learn
filter_scale: integer dicating (filters//2) or the number of filters in the partial feature stack
activation: string for activation function to use in layer
kernel_initializer: string to indicate which function to use to initialize weights
bias_initializer: string to indicate which function to use to initialize bias
kernel_regularizer: string to indicate which function to use to regularizer weights
bias_regularizer: string to indicate which function to use to regularizer bias
use_bn: boolean for whether to use batch normalization
use_sync_bn: boolean for whether sync batch normalization statistics
of all batch norm layers to the models global statistics (across all input batches)
norm_moment: float for moment to use for batch normalization
norm_epsilon: float for batch normalization epsilon
**kwargs: Keyword Arguments
"""
def
__init__
(
self
,
filters
,
model_to_wrap
=
None
,
filter_scale
=
2
,
activation
=
"mish"
,
kernel_initializer
=
'glorot_uniform'
,
bias_initializer
=
'zeros'
,
bias_regularizer
=
None
,
kernel_regularizer
=
None
,
downsample
=
True
,
use_bn
=
True
,
use_sync_bn
=
False
,
norm_momentum
=
0.99
,
norm_epsilon
=
0.001
,
**
kwargs
):
super
().
__init__
(
**
kwargs
)
#layer params
self
.
_filters
=
filters
self
.
_filter_scale
=
filter_scale
self
.
_activation
=
activation
self
.
_downsample
=
downsample
#convoultion params
self
.
_kernel_initializer
=
kernel_initializer
self
.
_bias_initializer
=
bias_initializer
self
.
_kernel_regularizer
=
kernel_regularizer
self
.
_bias_regularizer
=
bias_regularizer
self
.
_use_bn
=
use_bn
self
.
_use_sync_bn
=
use_sync_bn
self
.
_norm_moment
=
norm_momentum
self
.
_norm_epsilon
=
norm_epsilon
if
model_to_wrap
!=
None
:
if
isinstance
(
model_to_wrap
,
Callable
):
self
.
_model_to_wrap
=
[
model_to_wrap
]
elif
isinstance
(
model_to_wrap
,
List
):
self
.
_model_to_wrap
=
model_to_wrap
else
:
raise
Exception
(
"the input to the CSPStack must be a list of layers that we can iterate through, or
\n
a callable"
)
else
:
self
.
_model_to_wrap
=
[]
def
build
(
self
,
input_shape
):
_dark_conv_args
=
{
"filters"
:
self
.
_filters
,
"filter_scale"
:
self
.
_filter_scale
,
"activation"
:
self
.
_activation
,
"kernel_initializer"
:
self
.
_kernel_initializer
,
"bias_initializer"
:
self
.
_bias_initializer
,
"bias_regularizer"
:
self
.
_bias_regularizer
,
"use_bn"
:
self
.
_use_bn
,
"use_sync_bn"
:
self
.
_use_sync_bn
,
"norm_momentum"
:
self
.
_norm_moment
,
"norm_epsilon"
:
self
.
_norm_epsilon
,
"kernel_regularizer"
:
self
.
_kernel_regularizer
,
}
self
.
_route
=
CSPRoute
(
downsample
=
self
.
_downsample
,
**
_dark_conv_args
)
self
.
_connect
=
CSPConnect
(
**
_dark_conv_args
)
return
def
call
(
self
,
inputs
):
x
,
x_route
=
self
.
_route
(
inputs
)
for
layer
in
self
.
_model_to_wrap
:
x
=
layer
(
x
)
x
=
self
.
_connect
([
x
,
x_route
])
return
x
official/vision/beta/projects/yolo/modeling/layers/nn_blocks_test.py
View file @
d5747aac
...
...
@@ -7,14 +7,14 @@ from official.vision.beta.projects.yolo.modeling.layers import nn_blocks
class
CSPConnect
(
tf
.
test
.
TestCase
,
parameterized
.
TestCase
):
class
CSPConnect
Test
(
tf
.
test
.
TestCase
,
parameterized
.
TestCase
):
@
parameterized
.
named_parameters
((
"same"
,
224
,
224
,
64
,
1
),
(
"downsample"
,
224
,
224
,
64
,
2
))
def
test_pass_through
(
self
,
width
,
height
,
filters
,
mod
):
x
=
ks
.
Input
(
shape
=
(
width
,
height
,
filters
))
test_layer
=
nn_blocks
.
CSP
DownSampl
e
(
filters
=
filters
,
filter_
reduc
e
=
mod
)
test_layer2
=
nn_blocks
.
CSPConnect
(
filters
=
filters
,
filter_
reduc
e
=
mod
)
test_layer
=
nn_blocks
.
CSP
Rout
e
(
filters
=
filters
,
filter_
scal
e
=
mod
)
test_layer2
=
nn_blocks
.
CSPConnect
(
filters
=
filters
,
filter_
scal
e
=
mod
)
outx
,
px
=
test_layer
(
x
)
outx
=
test_layer2
([
outx
,
px
])
print
(
outx
)
...
...
@@ -29,8 +29,8 @@ class CSPConnect(tf.test.TestCase, parameterized.TestCase):
def
test_gradient_pass_though
(
self
,
filters
,
width
,
height
,
mod
):
loss
=
ks
.
losses
.
MeanSquaredError
()
optimizer
=
ks
.
optimizers
.
SGD
()
test_layer
=
nn_blocks
.
CSP
DownSampl
e
(
filters
,
filter_
reduc
e
=
mod
)
path_layer
=
nn_blocks
.
CSPConnect
(
filters
,
filter_
reduc
e
=
mod
)
test_layer
=
nn_blocks
.
CSP
Rout
e
(
filters
,
filter_
scal
e
=
mod
)
path_layer
=
nn_blocks
.
CSPConnect
(
filters
,
filter_
scal
e
=
mod
)
init
=
tf
.
random_normal_initializer
()
x
=
tf
.
Variable
(
...
...
@@ -49,13 +49,13 @@ class CSPConnect(tf.test.TestCase, parameterized.TestCase):
self
.
assertNotIn
(
None
,
grad
)
class
CSP
DownSample
(
tf
.
test
.
TestCase
,
parameterized
.
TestCase
):
class
CSP
RouteTest
(
tf
.
test
.
TestCase
,
parameterized
.
TestCase
):
@
parameterized
.
named_parameters
((
"same"
,
224
,
224
,
64
,
1
),
(
"downsample"
,
224
,
224
,
64
,
2
))
def
test_pass_through
(
self
,
width
,
height
,
filters
,
mod
):
x
=
ks
.
Input
(
shape
=
(
width
,
height
,
filters
))
test_layer
=
nn_blocks
.
CSP
DownSampl
e
(
filters
=
filters
,
filter_
reduc
e
=
mod
)
test_layer
=
nn_blocks
.
CSP
Rout
e
(
filters
=
filters
,
filter_
scal
e
=
mod
)
outx
,
px
=
test_layer
(
x
)
print
(
outx
)
print
(
outx
.
shape
.
as_list
())
...
...
@@ -69,8 +69,8 @@ class CSPDownSample(tf.test.TestCase, parameterized.TestCase):
def
test_gradient_pass_though
(
self
,
filters
,
width
,
height
,
mod
):
loss
=
ks
.
losses
.
MeanSquaredError
()
optimizer
=
ks
.
optimizers
.
SGD
()
test_layer
=
nn_blocks
.
CSP
DownSampl
e
(
filters
,
filter_
reduc
e
=
mod
)
path_layer
=
nn_blocks
.
CSPConnect
(
filters
,
filter_
reduc
e
=
mod
)
test_layer
=
nn_blocks
.
CSP
Rout
e
(
filters
,
filter_
scal
e
=
mod
)
path_layer
=
nn_blocks
.
CSPConnect
(
filters
,
filter_
scal
e
=
mod
)
init
=
tf
.
random_normal_initializer
()
x
=
tf
.
Variable
(
...
...
@@ -89,7 +89,75 @@ class CSPDownSample(tf.test.TestCase, parameterized.TestCase):
self
.
assertNotIn
(
None
,
grad
)
class
DarkConvTest
(
tf
.
test
.
TestCase
,
parameterized
.
TestCase
):
class
CSPStackTest
(
tf
.
test
.
TestCase
,
parameterized
.
TestCase
):
def
build_layer
(
self
,
layer_type
,
filters
,
filter_scale
,
count
,
stack_type
,
downsample
):
if
stack_type
!=
None
:
layers
=
[]
if
layer_type
==
"residual"
:
for
_
in
range
(
count
):
layers
.
append
(
nn_blocks
.
DarkResidual
(
filters
=
filters
//
filter_scale
,
filter_scale
=
filter_scale
))
else
:
for
_
in
range
(
count
):
layers
.
append
(
nn_blocks
.
ConvBN
(
filters
=
filters
))
if
stack_type
==
"model"
:
layers
=
tf
.
keras
.
Sequential
(
layers
=
layers
)
else
:
layers
=
None
stack
=
nn_blocks
.
CSPStack
(
filters
=
filters
,
filter_scale
=
filter_scale
,
downsample
=
downsample
,
model_to_wrap
=
layers
)
return
stack
@
parameterized
.
named_parameters
((
"no_stack"
,
224
,
224
,
64
,
2
,
"residual"
,
None
,
0
,
True
),
(
"residual_stack"
,
224
,
224
,
64
,
2
,
"residual"
,
"list"
,
2
,
True
),
(
"conv_stack"
,
224
,
224
,
64
,
2
,
"conv"
,
"list"
,
3
,
False
),
(
"callable_no_scale"
,
224
,
224
,
64
,
1
,
"residual"
,
"model"
,
5
,
False
))
def
test_pass_through
(
self
,
width
,
height
,
filters
,
mod
,
layer_type
,
stack_type
,
count
,
downsample
):
x
=
ks
.
Input
(
shape
=
(
width
,
height
,
filters
))
test_layer
=
self
.
build_layer
(
layer_type
,
filters
,
mod
,
count
,
stack_type
,
downsample
)
outx
=
test_layer
(
x
)
print
(
outx
)
print
(
outx
.
shape
.
as_list
())
if
downsample
:
self
.
assertAllEqual
(
outx
.
shape
.
as_list
(),
[
None
,
width
//
2
,
height
//
2
,
filters
])
else
:
self
.
assertAllEqual
(
outx
.
shape
.
as_list
(),
[
None
,
width
,
height
,
filters
])
@
parameterized
.
named_parameters
((
"no_stack"
,
224
,
224
,
64
,
2
,
"residual"
,
None
,
0
,
True
),
(
"residual_stack"
,
224
,
224
,
64
,
2
,
"residual"
,
"list"
,
2
,
True
),
(
"conv_stack"
,
224
,
224
,
64
,
2
,
"conv"
,
"list"
,
3
,
False
),
(
"callable_no_scale"
,
224
,
224
,
64
,
1
,
"residual"
,
"model"
,
5
,
False
))
def
test_gradient_pass_though
(
self
,
width
,
height
,
filters
,
mod
,
layer_type
,
stack_type
,
count
,
downsample
):
loss
=
ks
.
losses
.
MeanSquaredError
()
optimizer
=
ks
.
optimizers
.
SGD
()
init
=
tf
.
random_normal_initializer
()
x
=
tf
.
Variable
(
initial_value
=
init
(
shape
=
(
1
,
width
,
height
,
filters
),
dtype
=
tf
.
float32
))
if
not
downsample
:
y
=
tf
.
Variable
(
initial_value
=
init
(
shape
=
(
1
,
width
,
height
,
filters
),
dtype
=
tf
.
float32
))
else
:
y
=
tf
.
Variable
(
initial_value
=
init
(
shape
=
(
1
,
width
//
2
,
height
//
2
,
filters
),
dtype
=
tf
.
float32
))
test_layer
=
self
.
build_layer
(
layer_type
,
filters
,
mod
,
count
,
stack_type
,
downsample
)
with
tf
.
GradientTape
()
as
tape
:
x_hat
=
test_layer
(
x
)
grad_loss
=
loss
(
x_hat
,
y
)
grad
=
tape
.
gradient
(
grad_loss
,
test_layer
.
trainable_variables
)
optimizer
.
apply_gradients
(
zip
(
grad
,
test_layer
.
trainable_variables
))
self
.
assertNotIn
(
None
,
grad
)
class
ConvBNTest
(
tf
.
test
.
TestCase
,
parameterized
.
TestCase
):
@
parameterized
.
named_parameters
(
(
"valid"
,
(
3
,
3
),
"valid"
,
(
1
,
1
)),
(
"same"
,
(
3
,
3
),
"same"
,
(
1
,
1
)),
...
...
@@ -100,7 +168,7 @@ class DarkConvTest(tf.test.TestCase, parameterized.TestCase):
else
:
pad_const
=
0
x
=
ks
.
Input
(
shape
=
(
224
,
224
,
3
))
test_layer
=
nn_blocks
.
Dark
Conv
(
filters
=
64
,
test_layer
=
nn_blocks
.
Conv
BN
(
filters
=
64
,
kernel_size
=
kernel_size
,
padding
=
padding
,
strides
=
strides
,
...
...
@@ -120,7 +188,7 @@ class DarkConvTest(tf.test.TestCase, parameterized.TestCase):
loss
=
ks
.
losses
.
MeanSquaredError
()
optimizer
=
ks
.
optimizers
.
SGD
()
with
tf
.
device
(
"/CPU:0"
):
test_layer
=
nn_blocks
.
Dark
Conv
(
filters
,
kernel_size
=
(
3
,
3
),
padding
=
"same"
)
test_layer
=
nn_blocks
.
Conv
BN
(
filters
,
kernel_size
=
(
3
,
3
),
padding
=
"same"
)
init
=
tf
.
random_normal_initializer
()
x
=
tf
.
Variable
(
initial_value
=
init
(
shape
=
(
1
,
224
,
224
,
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment