Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ModelZoo
ResNet50_tensorflow
Commits
27bccda7
Commit
27bccda7
authored
Sep 21, 2021
by
Vishnu Banna
Browse files
darknet and layer update
parent
3bee8403
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
196 additions
and
39 deletions
+196
-39
official/vision/beta/projects/yolo/modeling/backbones/darknet.py
...l/vision/beta/projects/yolo/modeling/backbones/darknet.py
+19
-11
official/vision/beta/projects/yolo/modeling/layers/detection_generator.py
...beta/projects/yolo/modeling/layers/detection_generator.py
+1
-2
official/vision/beta/projects/yolo/modeling/layers/nn_blocks.py
...al/vision/beta/projects/yolo/modeling/layers/nn_blocks.py
+176
-26
No files found.
official/vision/beta/projects/yolo/modeling/backbones/darknet.py
View file @
27bccda7
...
...
@@ -12,6 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Contains definitions of Darknet Backbone Networks.
The models are inspired by ResNet and CSPNet.
...
...
@@ -383,9 +384,11 @@ class Darknet(tf.keras.Model):
max_level
=
5
,
width_scale
=
1.0
,
depth_scale
=
1.0
,
use_reorg_input
=
False
,
csp_level_mod
=
(),
activation
=
None
,
use_sync_bn
=
False
,
use_separable_conv
=
False
,
norm_momentum
=
0.99
,
norm_epsilon
=
0.001
,
dilate
=
False
,
...
...
@@ -412,11 +415,13 @@ class Darknet(tf.keras.Model):
self
.
_norm_momentum
=
norm_momentum
self
.
_norm_epislon
=
norm_epsilon
self
.
_use_sync_bn
=
use_sync_bn
self
.
_use_separable_conv
=
use_separable_conv
self
.
_activation
=
activation
self
.
_kernel_regularizer
=
kernel_regularizer
self
.
_dilate
=
dilate
self
.
_width_scale
=
width_scale
self
.
_depth_scale
=
depth_scale
self
.
_use_reorg_input
=
use_reorg_input
self
.
_default_dict
=
{
'kernel_initializer'
:
self
.
_kernel_initializer
,
...
...
@@ -426,6 +431,7 @@ class Darknet(tf.keras.Model):
'norm_epsilon'
:
self
.
_norm_epislon
,
'use_sync_bn'
:
self
.
_use_sync_bn
,
'activation'
:
self
.
_activation
,
'use_separable_conv'
:
self
.
_use_separable_conv
,
'dilation_rate'
:
1
,
'name'
:
None
}
...
...
@@ -447,6 +453,9 @@ class Darknet(tf.keras.Model):
return
self
.
_splits
def
_build_struct
(
self
,
net
,
inputs
):
if
self
.
_use_reorg_input
:
inputs
=
nn_blocks
.
Reorg
()(
inputs
)
endpoints
=
collections
.
OrderedDict
()
stack_outputs
=
[
inputs
]
for
i
,
config
in
enumerate
(
net
):
...
...
@@ -658,29 +667,28 @@ class Darknet(tf.keras.Model):
}
return
layer_config
@
factory
.
register_backbone_builder
(
'darknet'
)
def
build_darknet
(
input_specs
:
tf
.
keras
.
layers
.
InputSpec
,
backbone_c
f
g
:
hyperparams
.
Config
,
backbone_c
onfi
g
:
hyperparams
.
Config
,
norm_activation_config
:
hyperparams
.
Config
,
l2_regularizer
:
tf
.
keras
.
regularizers
.
Regularizer
=
None
)
->
tf
.
keras
.
Model
:
"""Builds darknet."""
backbone_cfg
=
backbone_cfg
.
get
()
backbone_config
=
backbone_config
.
get
()
model
=
Darknet
(
model_id
=
backbone_c
f
g
.
model_id
,
min_level
=
backbone_c
f
g
.
min_level
,
max_level
=
backbone_c
f
g
.
max_level
,
model_id
=
backbone_c
onfi
g
.
model_id
,
min_level
=
backbone_c
onfi
g
.
min_level
,
max_level
=
backbone_c
onfi
g
.
max_level
,
input_specs
=
input_specs
,
dilate
=
backbone_cfg
.
dilate
,
width_scale
=
backbone_cfg
.
width_scale
,
depth_scale
=
backbone_cfg
.
depth_scale
,
dilate
=
backbone_config
.
dilate
,
width_scale
=
backbone_config
.
width_scale
,
depth_scale
=
backbone_config
.
depth_scale
,
use_reorg_input
=
backbone_config
.
use_reorg_input
,
activation
=
norm_activation_config
.
activation
,
use_sync_bn
=
norm_activation_config
.
use_sync_bn
,
use_separable_conv
=
backbone_config
.
use_separable_conv
,
norm_momentum
=
norm_activation_config
.
norm_momentum
,
norm_epsilon
=
norm_activation_config
.
norm_epsilon
,
kernel_regularizer
=
l2_regularizer
)
model
.
summary
()
return
model
official/vision/beta/projects/yolo/modeling/layers/detection_generator.py
View file @
27bccda7
...
...
@@ -280,8 +280,7 @@ class YoloLayer(tf.keras.Model):
'num_detections'
:
num_detections
,
}
@
property
def
losses
(
self
):
def
get_losses
(
self
):
"""Generates a dictionary of losses to apply to each path.
Done in the detection generator because all parameters are the same
...
...
official/vision/beta/projects/yolo/modeling/layers/nn_blocks.py
View file @
27bccda7
...
...
@@ -12,7 +12,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Contains common building blocks for yolo neural networks."""
from
typing
import
Callable
,
List
,
Tuple
import
tensorflow
as
tf
from
official.modeling
import
tf_utils
from
official.vision.beta.ops
import
spatial_transform_ops
...
...
@@ -50,6 +52,7 @@ class ConvBN(tf.keras.layers.Layer):
bias_initializer
=
'zeros'
,
bias_regularizer
=
None
,
kernel_regularizer
=
None
,
use_separable_conv
=
False
,
use_bn
=
True
,
use_sync_bn
=
False
,
norm_momentum
=
0.99
,
...
...
@@ -96,7 +99,7 @@ class ConvBN(tf.keras.layers.Layer):
self
.
_padding
=
padding
self
.
_dilation_rate
=
dilation_rate
if
kernel_initializer
==
'
VarianceScaling
'
:
if
kernel_initializer
==
"
VarianceScaling
"
:
# to match pytorch initialization method
self
.
_kernel_initializer
=
tf
.
keras
.
initializers
.
VarianceScaling
(
scale
=
1
/
3
,
mode
=
'fan_in'
,
distribution
=
'uniform'
)
...
...
@@ -110,10 +113,24 @@ class ConvBN(tf.keras.layers.Layer):
# batch normalization params
self
.
_use_bn
=
use_bn
self
.
_use_separable_conv
=
use_separable_conv
self
.
_use_sync_bn
=
use_sync_bn
self
.
_norm_momentum
=
norm_momentum
self
.
_norm_epsilon
=
norm_epsilon
ksize
=
self
.
_kernel_size
if
not
isinstance
(
ksize
,
List
)
and
not
isinstance
(
ksize
,
Tuple
):
ksize
=
[
ksize
]
if
use_separable_conv
and
not
all
([
a
==
1
for
a
in
ksize
]):
self
.
_conv_base
=
tf
.
keras
.
layers
.
SeparableConv2D
else
:
self
.
_conv_base
=
tf
.
keras
.
layers
.
Conv2D
if
use_sync_bn
:
self
.
_bn_base
=
tf
.
keras
.
layers
.
experimental
.
SyncBatchNormalization
else
:
self
.
_bn_base
=
tf
.
keras
.
layers
.
BatchNormalization
if
tf
.
keras
.
backend
.
image_data_format
()
==
'channels_last'
:
# format: (batch_size, height, width, channels)
self
.
_bn_axis
=
-
1
...
...
@@ -130,7 +147,7 @@ class ConvBN(tf.keras.layers.Layer):
def
build
(
self
,
input_shape
):
use_bias
=
not
self
.
_use_bn
self
.
conv
=
tf
.
keras
.
layers
.
Conv2D
(
self
.
conv
=
self
.
_conv_base
(
filters
=
self
.
_filters
,
kernel_size
=
self
.
_kernel_size
,
strides
=
self
.
_strides
,
...
...
@@ -143,16 +160,10 @@ class ConvBN(tf.keras.layers.Layer):
bias_regularizer
=
self
.
_bias_regularizer
)
if
self
.
_use_bn
:
if
self
.
_use_sync_bn
:
self
.
bn
=
tf
.
keras
.
layers
.
experimental
.
SyncBatchNormalization
(
momentum
=
self
.
_norm_momentum
,
epsilon
=
self
.
_norm_epsilon
,
axis
=
self
.
_bn_axis
)
else
:
self
.
bn
=
tf
.
keras
.
layers
.
BatchNormalization
(
momentum
=
self
.
_norm_momentum
,
epsilon
=
self
.
_norm_epsilon
,
axis
=
self
.
_bn_axis
)
self
.
bn
=
self
.
_bn_base
(
momentum
=
self
.
_norm_momentum
,
epsilon
=
self
.
_norm_epsilon
,
axis
=
self
.
_bn_axis
)
if
self
.
_activation
==
'leaky'
:
self
.
_activation_fn
=
tf
.
keras
.
layers
.
LeakyReLU
(
alpha
=
self
.
_leaky_alpha
)
...
...
@@ -182,6 +193,7 @@ class ConvBN(tf.keras.layers.Layer):
'kernel_regularizer'
:
self
.
_kernel_regularizer
,
'use_bn'
:
self
.
_use_bn
,
'use_sync_bn'
:
self
.
_use_sync_bn
,
'use_separable_conv'
:
self
.
_use_separable_conv
,
'norm_momentum'
:
self
.
_norm_momentum
,
'norm_epsilon'
:
self
.
_norm_epsilon
,
'activation'
:
self
.
_activation
,
...
...
@@ -205,6 +217,7 @@ class DarkResidual(tf.keras.layers.Layer):
bias_regularizer
=
None
,
use_bn
=
True
,
use_sync_bn
=
False
,
use_separable_conv
=
False
,
norm_momentum
=
0.99
,
norm_epsilon
=
0.001
,
activation
=
'leaky'
,
...
...
@@ -253,6 +266,7 @@ class DarkResidual(tf.keras.layers.Layer):
self
.
_bias_regularizer
=
bias_regularizer
self
.
_use_bn
=
use_bn
self
.
_use_sync_bn
=
use_sync_bn
self
.
_use_separable_conv
=
use_separable_conv
self
.
_kernel_regularizer
=
kernel_regularizer
# normal params
...
...
@@ -275,6 +289,7 @@ class DarkResidual(tf.keras.layers.Layer):
'bias_regularizer'
:
self
.
_bias_regularizer
,
'use_bn'
:
self
.
_use_bn
,
'use_sync_bn'
:
self
.
_use_sync_bn
,
'use_separable_conv'
:
self
.
_use_separable_conv
,
'norm_momentum'
:
self
.
_norm_momentum
,
'norm_epsilon'
:
self
.
_norm_epsilon
,
'activation'
:
self
.
_conv_activation
,
...
...
@@ -320,9 +335,7 @@ class DarkResidual(tf.keras.layers.Layer):
elif
self
.
_sc_activation
==
'mish'
:
self
.
_activation_fn
=
lambda
x
:
x
*
tf
.
math
.
tanh
(
tf
.
math
.
softplus
(
x
))
else
:
self
.
_activation_fn
=
tf_utils
.
get_activation
(
self
.
_sc_activation
)
self
.
_activation_fn
=
tf_utils
.
get_activation
(
self
.
_sc_activation
)
super
().
build
(
input_shape
)
def
call
(
self
,
inputs
,
training
=
None
):
...
...
@@ -378,6 +391,7 @@ class CSPTiny(tf.keras.layers.Layer):
use_bn
=
True
,
dilation_rate
=
1
,
use_sync_bn
=
False
,
use_separable_conv
=
False
,
group_id
=
1
,
groups
=
2
,
norm_momentum
=
0.99
,
...
...
@@ -425,6 +439,7 @@ class CSPTiny(tf.keras.layers.Layer):
self
.
_use_bn
=
use_bn
self
.
_dilation_rate
=
dilation_rate
self
.
_use_sync_bn
=
use_sync_bn
self
.
_use_separable_conv
=
use_separable_conv
self
.
_kernel_regularizer
=
kernel_regularizer
self
.
_groups
=
groups
self
.
_group_id
=
group_id
...
...
@@ -444,13 +459,14 @@ class CSPTiny(tf.keras.layers.Layer):
dark_conv_args
=
{
'kernel_initializer'
:
self
.
_kernel_initializer
,
'bias_initializer'
:
self
.
_bias_initializer
,
'kernel_regularizer'
:
self
.
_kernel_regularizer
,
'bias_regularizer'
:
self
.
_bias_regularizer
,
'use_bn'
:
self
.
_use_bn
,
'use_sync_bn'
:
self
.
_use_sync_bn
,
'use_separable_conv'
:
self
.
_use_separable_conv
,
'norm_momentum'
:
self
.
_norm_momentum
,
'norm_epsilon'
:
self
.
_norm_epsilon
,
'activation'
:
self
.
_conv_activation
,
'kernel_regularizer'
:
self
.
_kernel_regularizer
,
'leaky_alpha'
:
self
.
_leaky_alpha
}
self
.
_convlayer1
=
ConvBN
(
...
...
@@ -465,16 +481,7 @@ class CSPTiny(tf.keras.layers.Layer):
kernel_size
=
(
3
,
3
),
strides
=
(
1
,
1
),
padding
=
'same'
,
kernel_initializer
=
self
.
_kernel_initializer
,
bias_initializer
=
self
.
_bias_initializer
,
bias_regularizer
=
self
.
_bias_regularizer
,
kernel_regularizer
=
self
.
_kernel_regularizer
,
use_bn
=
self
.
_use_bn
,
use_sync_bn
=
self
.
_use_sync_bn
,
norm_momentum
=
self
.
_norm_momentum
,
norm_epsilon
=
self
.
_norm_epsilon
,
activation
=
self
.
_conv_activation
,
leaky_alpha
=
self
.
_leaky_alpha
)
**
dark_conv_args
)
self
.
_convlayer3
=
ConvBN
(
filters
=
self
.
_filters
//
2
,
...
...
@@ -544,6 +551,7 @@ class CSPRoute(tf.keras.layers.Layer):
dilation_rate
=
1
,
use_bn
=
True
,
use_sync_bn
=
False
,
use_separable_conv
=
False
,
norm_momentum
=
0.99
,
norm_epsilon
=
0.001
,
downsample
=
True
,
...
...
@@ -590,6 +598,7 @@ class CSPRoute(tf.keras.layers.Layer):
self
.
_dilation_rate
=
dilation_rate
self
.
_use_bn
=
use_bn
self
.
_use_sync_bn
=
use_sync_bn
self
.
_use_separable_conv
=
use_separable_conv
self
.
_norm_momentum
=
norm_momentum
self
.
_norm_epsilon
=
norm_epsilon
self
.
_downsample
=
downsample
...
...
@@ -602,6 +611,7 @@ class CSPRoute(tf.keras.layers.Layer):
'bias_regularizer'
:
self
.
_bias_regularizer
,
'use_bn'
:
self
.
_use_bn
,
'use_sync_bn'
:
self
.
_use_sync_bn
,
'use_separable_conv'
:
self
.
_use_separable_conv
,
'norm_momentum'
:
self
.
_norm_momentum
,
'norm_epsilon'
:
self
.
_norm_epsilon
,
'activation'
:
self
.
_activation
,
...
...
@@ -673,6 +683,7 @@ class CSPConnect(tf.keras.layers.Layer):
dilation_rate
=
1
,
use_bn
=
True
,
use_sync_bn
=
False
,
use_separable_conv
=
False
,
norm_momentum
=
0.99
,
norm_epsilon
=
0.001
,
leaky_alpha
=
0.1
,
...
...
@@ -720,6 +731,7 @@ class CSPConnect(tf.keras.layers.Layer):
self
.
_bias_regularizer
=
bias_regularizer
self
.
_use_bn
=
use_bn
self
.
_use_sync_bn
=
use_sync_bn
self
.
_use_separable_conv
=
use_separable_conv
self
.
_norm_momentum
=
norm_momentum
self
.
_norm_epsilon
=
norm_epsilon
self
.
_drop_final
=
drop_final
...
...
@@ -733,6 +745,7 @@ class CSPConnect(tf.keras.layers.Layer):
'bias_regularizer'
:
self
.
_bias_regularizer
,
'use_bn'
:
self
.
_use_bn
,
'use_sync_bn'
:
self
.
_use_sync_bn
,
'use_separable_conv'
:
self
.
_use_separable_conv
,
'norm_momentum'
:
self
.
_norm_momentum
,
'norm_epsilon'
:
self
.
_norm_epsilon
,
'activation'
:
self
.
_activation
,
...
...
@@ -766,6 +779,124 @@ class CSPConnect(tf.keras.layers.Layer):
return
x
class
CSPStack
(
tf
.
keras
.
layers
.
Layer
):
"""CSP Stack layer.
CSP full stack, combines the route and the connect in case you dont want to
jsut quickly wrap an existing callable or list of layers to
make it a cross stage partial. Added for ease of use. you should be able
to wrap any layer stack with a CSP independent of wether it belongs
to the Darknet family. if filter_scale = 2, then the blocks in the stack
passed into the the CSP stack should also have filters = filters/filter_scale
Cross Stage Partial networks (CSPNets) were proposed in:
[1] Chien-Yao Wang, Hong-Yuan Mark Liao, I-Hau Yeh, Yueh-Hua Wu,
Ping-Yang Chen, Jun-Wei Hsieh
CSPNet: A New Backbone that can Enhance Learning Capability of CNN.
arXiv:1911.11929
"""
def
__init__
(
self
,
filters
,
model_to_wrap
=
None
,
filter_scale
=
2
,
activation
=
'mish'
,
kernel_initializer
=
'VarianceScaling'
,
bias_initializer
=
'zeros'
,
bias_regularizer
=
None
,
kernel_regularizer
=
None
,
downsample
=
True
,
use_bn
=
True
,
use_sync_bn
=
False
,
use_separable_conv
=
False
,
norm_momentum
=
0.99
,
norm_epsilon
=
0.001
,
**
kwargs
):
"""CSPStack layer initializer.
Args:
model_to_wrap: callable Model or a list of callable objects that will
process the output of CSPRoute, and be input into CSPConnect.
list will be called sequentially.
filter_scale: integer dictating (filters//2) or the number of filters in
the partial feature stack.
activation: string for activation function to use in layer.
kernel_initializer: string to indicate which function to use to initialize
weights.
bias_initializer: string to indicate which function to use to initialize
bias.
bias_regularizer: string to indicate which function to use to regularizer
bias.
kernel_regularizer: string to indicate which function to use to
regularizer weights.
downsample: down_sample the input.
use_bn: boolean for whether to use batch normalization.
use_sync_bn: boolean for whether sync batch normalization statistics
of all batch norm layers to the models global statistics
(across all input batches).
norm_momentum: float for moment to use for batch normalization.
norm_epsilon: float for batch normalization epsilon.
**kwargs: Keyword Arguments.
Raises:
TypeError: model_to_wrap is not a layer or a list of layers
"""
super
().
__init__
(
**
kwargs
)
# layer params
self
.
_filters
=
filters
self
.
_filter_scale
=
filter_scale
self
.
_activation
=
activation
self
.
_downsample
=
downsample
# convoultion params
self
.
_kernel_initializer
=
kernel_initializer
self
.
_bias_initializer
=
bias_initializer
self
.
_kernel_regularizer
=
kernel_regularizer
self
.
_bias_regularizer
=
bias_regularizer
self
.
_use_bn
=
use_bn
self
.
_use_sync_bn
=
use_sync_bn
self
.
_use_separable_conv
=
use_separable_conv
self
.
_norm_momentum
=
norm_momentum
self
.
_norm_epsilon
=
norm_epsilon
if
model_to_wrap
is
None
:
self
.
_model_to_wrap
=
[]
elif
isinstance
(
model_to_wrap
,
Callable
):
self
.
_model_to_wrap
=
[
model_to_wrap
]
elif
isinstance
(
model_to_wrap
,
List
):
self
.
_model_to_wrap
=
model_to_wrap
else
:
raise
TypeError
(
'the input to the CSPStack must be a list of layers that we can'
+
'iterate through, or
\n
a callable'
)
def
build
(
self
,
input_shape
):
dark_conv_args
=
{
'filters'
:
self
.
_filters
,
'filter_scale'
:
self
.
_filter_scale
,
'activation'
:
self
.
_activation
,
'kernel_initializer'
:
self
.
_kernel_initializer
,
'bias_initializer'
:
self
.
_bias_initializer
,
'bias_regularizer'
:
self
.
_bias_regularizer
,
'use_bn'
:
self
.
_use_bn
,
'use_sync_bn'
:
self
.
_use_sync_bn
,
'use_separable_conv'
:
self
.
_use_separable_conv
,
'norm_momentum'
:
self
.
_norm_momentum
,
'norm_epsilon'
:
self
.
_norm_epsilon
,
'kernel_regularizer'
:
self
.
_kernel_regularizer
,
}
self
.
_route
=
CSPRoute
(
downsample
=
self
.
_downsample
,
**
dark_conv_args
)
self
.
_connect
=
CSPConnect
(
**
dark_conv_args
)
def
call
(
self
,
inputs
,
training
=
None
):
x
,
x_route
=
self
.
_route
(
inputs
)
for
layer
in
self
.
_model_to_wrap
:
x
=
layer
(
x
)
x
=
self
.
_connect
([
x
,
x_route
])
return
x
@
tf
.
keras
.
utils
.
register_keras_serializable
(
package
=
'yolo'
)
class
PathAggregationBlock
(
tf
.
keras
.
layers
.
Layer
):
"""Path Aggregation block."""
...
...
@@ -779,6 +910,7 @@ class PathAggregationBlock(tf.keras.layers.Layer):
kernel_regularizer
=
None
,
use_bn
=
True
,
use_sync_bn
=
False
,
use_separable_conv
=
False
,
inverted
=
False
,
norm_momentum
=
0.99
,
norm_epsilon
=
0.001
,
...
...
@@ -825,6 +957,7 @@ class PathAggregationBlock(tf.keras.layers.Layer):
self
.
_kernel_regularizer
=
kernel_regularizer
self
.
_use_bn
=
use_bn
self
.
_use_sync_bn
=
use_sync_bn
self
.
_use_separable_conv
=
use_separable_conv
# Normal params
self
.
_norm_momentum
=
norm_momentum
...
...
@@ -905,6 +1038,7 @@ class PathAggregationBlock(tf.keras.layers.Layer):
'bias_regularizer'
:
self
.
_bias_regularizer
,
'use_bn'
:
self
.
_use_bn
,
'use_sync_bn'
:
self
.
_use_sync_bn
,
'use_separable_conv'
:
self
.
_use_separable_conv
,
'norm_momentum'
:
self
.
_norm_momentum
,
'norm_epsilon'
:
self
.
_norm_epsilon
,
'activation'
:
self
.
_conv_activation
,
...
...
@@ -1015,6 +1149,7 @@ class SAM(tf.keras.layers.Layer):
kernel_regularizer
=
None
,
use_bn
=
True
,
use_sync_bn
=
True
,
use_separable_conv
=
False
,
norm_momentum
=
0.99
,
norm_epsilon
=
0.001
,
activation
=
'sigmoid'
,
...
...
@@ -1038,6 +1173,7 @@ class SAM(tf.keras.layers.Layer):
'bias_regularizer'
:
bias_regularizer
,
'use_bn'
:
use_bn
,
'use_sync_bn'
:
use_sync_bn
,
'use_separable_conv'
:
use_separable_conv
,
'norm_momentum'
:
norm_momentum
,
'norm_epsilon'
:
norm_epsilon
,
'activation'
:
activation
,
...
...
@@ -1180,6 +1316,7 @@ class CBAM(tf.keras.layers.Layer):
kernel_regularizer
=
None
,
use_bn
=
True
,
use_sync_bn
=
False
,
use_separable_conv
=
False
,
norm_momentum
=
0.99
,
norm_epsilon
=
0.001
,
mlp_activation
=
None
,
...
...
@@ -1196,6 +1333,7 @@ class CBAM(tf.keras.layers.Layer):
'strides'
:
strides
,
'padding'
:
padding
,
'dilation_rate'
:
dilation_rate
,
'use_separable_conv'
:
use_separable_conv
,
}
self
.
_cam_args
=
{
...
...
@@ -1256,6 +1394,7 @@ class DarkRouteProcess(tf.keras.layers.Layer):
bias_regularizer
=
None
,
kernel_regularizer
=
None
,
use_sync_bn
=
False
,
use_separable_conv
=
False
,
norm_momentum
=
0.99
,
norm_epsilon
=
0.001
,
block_invert
=
False
,
...
...
@@ -1316,6 +1455,7 @@ class DarkRouteProcess(tf.keras.layers.Layer):
# darkconv params
self
.
_filters
=
filters
self
.
_use_sync_bn
=
use_sync_bn
self
.
_use_separable_conv
=
use_separable_conv
self
.
_kernel_initializer
=
kernel_initializer
self
.
_bias_initializer
=
bias_initializer
self
.
_bias_regularizer
=
bias_regularizer
...
...
@@ -1457,6 +1597,7 @@ class DarkRouteProcess(tf.keras.layers.Layer):
'bias_initializer'
:
self
.
_bias_initializer
,
'bias_regularizer'
:
self
.
_bias_regularizer
,
'use_sync_bn'
:
self
.
_use_sync_bn
,
'use_separable_conv'
:
self
.
_use_separable_conv
,
'norm_momentum'
:
self
.
_norm_momentum
,
'norm_epsilon'
:
self
.
_norm_epsilon
,
'kernel_regularizer'
:
self
.
_kernel_regularizer
,
...
...
@@ -1521,3 +1662,12 @@ class DarkRouteProcess(tf.keras.layers.Layer):
return
self
.
_call_csp
(
inputs
,
training
=
training
)
else
:
return
self
.
_call_regular
(
inputs
)
class
Reorg
(
tf
.
keras
.
layers
.
Layer
):
def
call
(
self
,
x
,
training
=
None
):
return
tf
.
concat
([
x
[...,
::
2
,
::
2
,
:],
x
[...,
1
::
2
,
::
2
,
:],
x
[...,
::
2
,
1
::
2
,
:],
x
[...,
1
::
2
,
1
::
2
,
:]],
axis
=
-
1
)
\ No newline at end of file
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment