Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ModelZoo
ResNet50_tensorflow
Commits
e43d75f3
Commit
e43d75f3
authored
Oct 21, 2020
by
anivegesana
Browse files
Lint YOLO backbone and building block code
parent
5122a448
Changes
14
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
14 changed files
with
1117 additions
and
1085 deletions
+1117
-1085
official/vision/beta/projects/yolo/configs/backbones.py
official/vision/beta/projects/yolo/configs/backbones.py
+5
-3
official/vision/beta/projects/yolo/modeling/backbones/Darknet.py
...l/vision/beta/projects/yolo/modeling/backbones/Darknet.py
+267
-245
official/vision/beta/projects/yolo/modeling/building_blocks/_CSPConnect.py
...eta/projects/yolo/modeling/building_blocks/_CSPConnect.py
+63
-62
official/vision/beta/projects/yolo/modeling/building_blocks/_CSPDownSample.py
.../projects/yolo/modeling/building_blocks/_CSPDownSample.py
+73
-72
official/vision/beta/projects/yolo/modeling/building_blocks/_CSPTiny.py
...n/beta/projects/yolo/modeling/building_blocks/_CSPTiny.py
+138
-136
official/vision/beta/projects/yolo/modeling/building_blocks/_DarkConv.py
.../beta/projects/yolo/modeling/building_blocks/_DarkConv.py
+136
-137
official/vision/beta/projects/yolo/modeling/building_blocks/_DarkResidual.py
...a/projects/yolo/modeling/building_blocks/_DarkResidual.py
+118
-118
official/vision/beta/projects/yolo/modeling/building_blocks/_DarkTiny.py
.../beta/projects/yolo/modeling/building_blocks/_DarkTiny.py
+88
-86
official/vision/beta/projects/yolo/modeling/building_blocks/__init__.py
...n/beta/projects/yolo/modeling/building_blocks/__init__.py
+0
-1
official/vision/beta/projects/yolo/modeling/tests/test_CSPConnect.py
...sion/beta/projects/yolo/modeling/tests/test_CSPConnect.py
+43
-42
official/vision/beta/projects/yolo/modeling/tests/test_CSPDownSample.py
...n/beta/projects/yolo/modeling/tests/test_CSPDownSample.py
+42
-41
official/vision/beta/projects/yolo/modeling/tests/test_DarkConv.py
...vision/beta/projects/yolo/modeling/tests/test_DarkConv.py
+57
-56
official/vision/beta/projects/yolo/modeling/tests/test_DarkResidual.py
...on/beta/projects/yolo/modeling/tests/test_DarkResidual.py
+49
-48
official/vision/beta/projects/yolo/modeling/tests/test_DarkTiny.py
...vision/beta/projects/yolo/modeling/tests/test_DarkTiny.py
+38
-38
No files found.
official/vision/beta/projects/yolo/configs/backbones.py
View file @
e43d75f3
...
...
@@ -6,11 +6,13 @@ from official.modeling import hyperparams
from
official.vision.beta.configs
import
backbones
@
dataclasses
.
dataclass
class
DarkNet
(
hyperparams
.
Config
):
"""DarkNet config."""
model_id
:
str
=
"darknet53"
"""DarkNet config."""
model_id
:
str
=
"darknet53"
@
dataclasses
.
dataclass
class
Backbone
(
backbones
.
Backbone
):
darknet
:
DarkNet
=
DarkNet
()
darknet
:
DarkNet
=
DarkNet
()
official/vision/beta/projects/yolo/modeling/backbones/Darknet.py
View file @
e43d75f3
This diff is collapsed.
Click to expand it.
official/vision/beta/projects/yolo/modeling/building_blocks/_CSPConnect.py
View file @
e43d75f3
...
...
@@ -5,69 +5,70 @@ from ._DarkConv import DarkConv
@
ks
.
utils
.
register_keras_serializable
(
package
=
'yolo'
)
class
CSPConnect
(
ks
.
layers
.
Layer
):
def
__init__
(
self
,
filters
,
filter_reduce
=
2
,
activation
=
"mish"
,
kernel_initializer
=
'glorot_uniform'
,
bias_initializer
=
'zeros'
,
bias_regularizer
=
None
,
weight_decay
=
None
,
# default find where is it is stated
use_bn
=
True
,
use_sync_bn
=
False
,
norm_momentum
=
0.99
,
norm_epsilon
=
0.001
,
**
kwargs
):
super
().
__init__
(
**
kwargs
)
#layer params
self
.
_filters
=
filters
self
.
_filter_reduce
=
filter_reduce
self
.
_activation
=
activation
def
__init__
(
self
,
filters
,
filter_reduce
=
2
,
activation
=
"mish"
,
kernel_initializer
=
'glorot_uniform'
,
bias_initializer
=
'zeros'
,
bias_regularizer
=
None
,
weight_decay
=
None
,
# default find where is it is stated
use_bn
=
True
,
use_sync_bn
=
False
,
norm_momentum
=
0.99
,
norm_epsilon
=
0.001
,
**
kwargs
):
#convoultion params
self
.
_kernel_initializer
=
kernel_initializer
self
.
_bias_initializer
=
bias_initializer
self
.
_weight_decay
=
weight_decay
self
.
_bias_regularizer
=
bias_regularizer
self
.
_use_bn
=
use_bn
self
.
_use_sync_bn
=
use_sync_bn
self
.
_norm_moment
=
norm_momentum
self
.
_norm_epsilon
=
norm_epsilon
return
super
().
__init__
(
**
kwargs
)
#layer params
self
.
_filters
=
filters
self
.
_filter_reduce
=
filter_reduce
self
.
_activation
=
activation
def
build
(
self
,
input_shape
):
self
.
_conv1
=
DarkConv
(
filters
=
self
.
_filters
//
self
.
_filter_reduce
,
kernel_size
=
(
1
,
1
),
strides
=
(
1
,
1
),
kernel_initializer
=
self
.
_kernel_initializer
,
bias_initializer
=
self
.
_bias_initializer
,
bias_regularizer
=
self
.
_bias_regularizer
,
weight_decay
=
self
.
_weight_decay
,
use_bn
=
self
.
_use_bn
,
use_sync_bn
=
self
.
_use_sync_bn
,
norm_momentum
=
self
.
_norm_moment
,
norm_epsilon
=
self
.
_norm_epsilon
,
activation
=
self
.
_activation
)
self
.
_concat
=
ks
.
layers
.
Concatenate
(
axis
=-
1
)
self
.
_conv2
=
DarkConv
(
filters
=
self
.
_filters
,
kernel_size
=
(
1
,
1
),
strides
=
(
1
,
1
),
kernel_initializer
=
self
.
_kernel_initializer
,
bias_initializer
=
self
.
_bias_initializer
,
bias_regularizer
=
self
.
_bias_regularizer
,
weight_decay
=
self
.
_weight_decay
,
use_bn
=
self
.
_use_bn
,
use_sync_bn
=
self
.
_use_sync_bn
,
norm_momentum
=
self
.
_norm_moment
,
norm_epsilon
=
self
.
_norm_epsilon
,
activation
=
self
.
_activation
)
return
#convoultion params
self
.
_kernel_initializer
=
kernel_initializer
self
.
_bias_initializer
=
bias_initializer
self
.
_weight_decay
=
weight_decay
self
.
_bias_regularizer
=
bias_regularizer
self
.
_use_bn
=
use_bn
self
.
_use_sync_bn
=
use_sync_bn
self
.
_norm_moment
=
norm_momentum
self
.
_norm_epsilon
=
norm_epsilon
return
def
call
(
self
,
inputs
):
x_prev
,
x_csp
=
inputs
x
=
self
.
_conv1
(
x_prev
)
x
=
self
.
_concat
([
x
,
x_csp
])
x
=
self
.
_conv2
(
x
)
return
x
def
build
(
self
,
input_shape
):
self
.
_conv1
=
DarkConv
(
filters
=
self
.
_filters
//
self
.
_filter_reduce
,
kernel_size
=
(
1
,
1
),
strides
=
(
1
,
1
),
kernel_initializer
=
self
.
_kernel_initializer
,
bias_initializer
=
self
.
_bias_initializer
,
bias_regularizer
=
self
.
_bias_regularizer
,
weight_decay
=
self
.
_weight_decay
,
use_bn
=
self
.
_use_bn
,
use_sync_bn
=
self
.
_use_sync_bn
,
norm_momentum
=
self
.
_norm_moment
,
norm_epsilon
=
self
.
_norm_epsilon
,
activation
=
self
.
_activation
)
self
.
_concat
=
ks
.
layers
.
Concatenate
(
axis
=-
1
)
self
.
_conv2
=
DarkConv
(
filters
=
self
.
_filters
,
kernel_size
=
(
1
,
1
),
strides
=
(
1
,
1
),
kernel_initializer
=
self
.
_kernel_initializer
,
bias_initializer
=
self
.
_bias_initializer
,
bias_regularizer
=
self
.
_bias_regularizer
,
weight_decay
=
self
.
_weight_decay
,
use_bn
=
self
.
_use_bn
,
use_sync_bn
=
self
.
_use_sync_bn
,
norm_momentum
=
self
.
_norm_moment
,
norm_epsilon
=
self
.
_norm_epsilon
,
activation
=
self
.
_activation
)
return
def
call
(
self
,
inputs
):
x_prev
,
x_csp
=
inputs
x
=
self
.
_conv1
(
x_prev
)
x
=
self
.
_concat
([
x
,
x_csp
])
x
=
self
.
_conv2
(
x
)
return
x
official/vision/beta/projects/yolo/modeling/building_blocks/_CSPDownSample.py
View file @
e43d75f3
...
...
@@ -5,80 +5,81 @@ from ._DarkConv import DarkConv
@
ks
.
utils
.
register_keras_serializable
(
package
=
'yolo'
)
class
CSPDownSample
(
ks
.
layers
.
Layer
):
def
__init__
(
self
,
filters
,
filter_reduce
=
2
,
activation
=
"mish"
,
kernel_initializer
=
'glorot_uniform'
,
bias_initializer
=
'zeros'
,
bias_regularizer
=
None
,
weight_decay
=
None
,
# default find where is it is stated
use_bn
=
True
,
use_sync_bn
=
False
,
norm_momentum
=
0.99
,
norm_epsilon
=
0.001
,
**
kwargs
):
super
().
__init__
(
**
kwargs
)
#layer params
self
.
_filters
=
filters
self
.
_filter_reduce
=
filter_reduce
self
.
_activation
=
activation
def
__init__
(
self
,
filters
,
filter_reduce
=
2
,
activation
=
"mish"
,
kernel_initializer
=
'glorot_uniform'
,
bias_initializer
=
'zeros'
,
bias_regularizer
=
None
,
weight_decay
=
None
,
# default find where is it is stated
use_bn
=
True
,
use_sync_bn
=
False
,
norm_momentum
=
0.99
,
norm_epsilon
=
0.001
,
**
kwargs
):
#convoultion params
self
.
_kernel_initializer
=
kernel_initializer
self
.
_bias_initializer
=
bias_initializer
self
.
_weight_decay
=
weight_decay
self
.
_bias_regularizer
=
bias_regularizer
self
.
_use_bn
=
use_bn
self
.
_use_sync_bn
=
use_sync_bn
self
.
_norm_moment
=
norm_momentum
self
.
_norm_epsilon
=
norm_epsilon
return
super
().
__init__
(
**
kwargs
)
#layer params
self
.
_filters
=
filters
self
.
_filter_reduce
=
filter_reduce
self
.
_activation
=
activation
def
build
(
self
,
input_shape
):
self
.
_conv1
=
DarkConv
(
filters
=
self
.
_filters
,
kernel_size
=
(
3
,
3
),
strides
=
(
2
,
2
),
kernel_initializer
=
self
.
_kernel_initializer
,
bias_initializer
=
self
.
_bias_initializer
,
bias_regularizer
=
self
.
_bias_regularizer
,
weight_decay
=
self
.
_weight_decay
,
use_bn
=
self
.
_use_bn
,
use_sync_bn
=
self
.
_use_sync_bn
,
norm_momentum
=
self
.
_norm_moment
,
norm_epsilon
=
self
.
_norm_epsilon
,
activation
=
self
.
_activation
)
self
.
_conv2
=
DarkConv
(
filters
=
self
.
_filters
//
self
.
_filter_reduce
,
kernel_size
=
(
1
,
1
),
strides
=
(
1
,
1
),
kernel_initializer
=
self
.
_kernel_initializer
,
bias_initializer
=
self
.
_bias_initializer
,
bias_regularizer
=
self
.
_bias_regularizer
,
weight_decay
=
self
.
_weight_decay
,
use_bn
=
self
.
_use_bn
,
use_sync_bn
=
self
.
_use_sync_bn
,
norm_momentum
=
self
.
_norm_moment
,
norm_epsilon
=
self
.
_norm_epsilon
,
activation
=
self
.
_activation
)
#convoultion params
self
.
_kernel_initializer
=
kernel_initializer
self
.
_bias_initializer
=
bias_initializer
self
.
_weight_decay
=
weight_decay
self
.
_bias_regularizer
=
bias_regularizer
self
.
_use_bn
=
use_bn
self
.
_use_sync_bn
=
use_sync_bn
self
.
_norm_moment
=
norm_momentum
self
.
_norm_epsilon
=
norm_epsilon
return
self
.
_conv3
=
DarkConv
(
filters
=
self
.
_filters
//
self
.
_filter_reduce
,
kernel_size
=
(
1
,
1
),
strides
=
(
1
,
1
),
kernel_initializer
=
self
.
_kernel_initializer
,
bias_initializer
=
self
.
_bias_initializer
,
bias_regularizer
=
self
.
_bias_regularizer
,
weight_decay
=
self
.
_weight_decay
,
use_bn
=
self
.
_use_bn
,
use_sync_bn
=
self
.
_use_sync_bn
,
norm_momentum
=
self
.
_norm_moment
,
norm_epsilon
=
self
.
_norm_epsilon
,
activation
=
self
.
_activation
)
return
def
build
(
self
,
input_shape
):
self
.
_conv1
=
DarkConv
(
filters
=
self
.
_filters
,
kernel_size
=
(
3
,
3
),
strides
=
(
2
,
2
),
kernel_initializer
=
self
.
_kernel_initializer
,
bias_initializer
=
self
.
_bias_initializer
,
bias_regularizer
=
self
.
_bias_regularizer
,
weight_decay
=
self
.
_weight_decay
,
use_bn
=
self
.
_use_bn
,
use_sync_bn
=
self
.
_use_sync_bn
,
norm_momentum
=
self
.
_norm_moment
,
norm_epsilon
=
self
.
_norm_epsilon
,
activation
=
self
.
_activation
)
self
.
_conv2
=
DarkConv
(
filters
=
self
.
_filters
//
self
.
_filter_reduce
,
kernel_size
=
(
1
,
1
),
strides
=
(
1
,
1
),
kernel_initializer
=
self
.
_kernel_initializer
,
bias_initializer
=
self
.
_bias_initializer
,
bias_regularizer
=
self
.
_bias_regularizer
,
weight_decay
=
self
.
_weight_decay
,
use_bn
=
self
.
_use_bn
,
use_sync_bn
=
self
.
_use_sync_bn
,
norm_momentum
=
self
.
_norm_moment
,
norm_epsilon
=
self
.
_norm_epsilon
,
activation
=
self
.
_activation
)
def
call
(
self
,
inputs
):
x
=
self
.
_conv1
(
inputs
)
y
=
self
.
_conv2
(
x
)
x
=
self
.
_conv3
(
x
)
return
(
x
,
y
)
self
.
_conv3
=
DarkConv
(
filters
=
self
.
_filters
//
self
.
_filter_reduce
,
kernel_size
=
(
1
,
1
),
strides
=
(
1
,
1
),
kernel_initializer
=
self
.
_kernel_initializer
,
bias_initializer
=
self
.
_bias_initializer
,
bias_regularizer
=
self
.
_bias_regularizer
,
weight_decay
=
self
.
_weight_decay
,
use_bn
=
self
.
_use_bn
,
use_sync_bn
=
self
.
_use_sync_bn
,
norm_momentum
=
self
.
_norm_moment
,
norm_epsilon
=
self
.
_norm_epsilon
,
activation
=
self
.
_activation
)
return
def
call
(
self
,
inputs
):
x
=
self
.
_conv1
(
inputs
)
y
=
self
.
_conv2
(
x
)
x
=
self
.
_conv3
(
x
)
return
(
x
,
y
)
official/vision/beta/projects/yolo/modeling/building_blocks/_CSPTiny.py
View file @
e43d75f3
...
...
@@ -3,152 +3,154 @@ import tensorflow as tf
import
tensorflow.keras
as
ks
from
._DarkConv
import
DarkConv
@
ks
.
utils
.
register_keras_serializable
(
package
=
'yolo'
)
class
CSPTiny
(
ks
.
layers
.
Layer
):
def
__init__
(
self
,
filters
=
1
,
use_bias
=
True
,
kernel_initializer
=
'glorot_uniform'
,
bias_initializer
=
'zeros'
,
bias_regularizer
=
None
,
weight_decay
=
None
,
# default find where is it is stated
use_bn
=
True
,
use_sync_bn
=
False
,
group_id
=
1
,
groups
=
2
,
norm_momentum
=
0.99
,
norm_epsilon
=
0.001
,
activation
=
'leaky'
,
downsample
=
True
,
leaky_alpha
=
0.1
,
**
kwargs
):
# darkconv params
self
.
_filters
=
filters
self
.
_use_bias
=
use_bias
self
.
_kernel_initializer
=
kernel_initializer
self
.
_bias_initializer
=
bias_initializer
self
.
_bias_regularizer
=
bias_regularizer
self
.
_use_bn
=
use_bn
self
.
_use_sync_bn
=
use_sync_bn
self
.
_weight_decay
=
weight_decay
self
.
_groups
=
groups
self
.
_group_id
=
group_id
self
.
_downsample
=
downsample
def
__init__
(
self
,
filters
=
1
,
use_bias
=
True
,
kernel_initializer
=
'glorot_uniform'
,
bias_initializer
=
'zeros'
,
bias_regularizer
=
None
,
weight_decay
=
None
,
# default find where is it is stated
use_bn
=
True
,
use_sync_bn
=
False
,
group_id
=
1
,
groups
=
2
,
norm_momentum
=
0.99
,
norm_epsilon
=
0.001
,
activation
=
'leaky'
,
downsample
=
True
,
leaky_alpha
=
0.1
,
**
kwargs
):
# darkconv params
self
.
_filters
=
filters
self
.
_use_bias
=
use_bias
self
.
_kernel_initializer
=
kernel_initializer
self
.
_bias_initializer
=
bias_initializer
self
.
_bias_regularizer
=
bias_regularizer
self
.
_use_bn
=
use_bn
self
.
_use_sync_bn
=
use_sync_bn
self
.
_weight_decay
=
weight_decay
self
.
_groups
=
groups
self
.
_group_id
=
group_id
self
.
_downsample
=
downsample
# normal params
self
.
_norm_moment
=
norm_momentum
self
.
_norm_epsilon
=
norm_epsilon
#
normal
params
self
.
_
norm_moment
=
norm_momentum
self
.
_
norm_epsilon
=
norm_epsilon
#
activation
params
self
.
_
conv_activation
=
activation
self
.
_
leaky_alpha
=
leaky_alpha
# activation params
self
.
_conv_activation
=
activation
self
.
_leaky_alpha
=
leaky_alpha
super
().
__init__
(
**
kwargs
)
return
super
().
__init__
(
**
kwargs
)
return
def
build
(
self
,
input_shape
):
self
.
_convlayer1
=
DarkConv
(
filters
=
self
.
_filters
,
kernel_size
=
(
3
,
3
),
strides
=
(
1
,
1
),
padding
=
'same'
,
use_bias
=
self
.
_use_bias
,
kernel_initializer
=
self
.
_kernel_initializer
,
bias_initializer
=
self
.
_bias_initializer
,
bias_regularizer
=
self
.
_bias_regularizer
,
weight_decay
=
self
.
_weight_decay
,
use_bn
=
self
.
_use_bn
,
use_sync_bn
=
self
.
_use_sync_bn
,
norm_momentum
=
self
.
_norm_moment
,
norm_epsilon
=
self
.
_norm_epsilon
,
activation
=
self
.
_conv_activation
,
leaky_alpha
=
self
.
_leaky_alpha
)
def
build
(
self
,
input_shape
):
self
.
_convlayer1
=
DarkConv
(
filters
=
self
.
_filters
,
kernel_size
=
(
3
,
3
),
strides
=
(
1
,
1
),
padding
=
'same'
,
use_bias
=
self
.
_use_bias
,
kernel_initializer
=
self
.
_kernel_initializer
,
bias_initializer
=
self
.
_bias_initializer
,
bias_regularizer
=
self
.
_bias_regularizer
,
weight_decay
=
self
.
_weight_decay
,
use_bn
=
self
.
_use_bn
,
use_sync_bn
=
self
.
_use_sync_bn
,
norm_momentum
=
self
.
_norm_moment
,
norm_epsilon
=
self
.
_norm_epsilon
,
activation
=
self
.
_conv_activation
,
leaky_alpha
=
self
.
_leaky_alpha
)
self
.
_convlayer2
=
DarkConv
(
filters
=
self
.
_filters
//
2
,
kernel_size
=
(
3
,
3
),
strides
=
(
1
,
1
),
padding
=
'same'
,
use_bias
=
self
.
_use_bias
,
kernel_initializer
=
self
.
_kernel_initializer
,
bias_initializer
=
self
.
_bias_initializer
,
bias_regularizer
=
self
.
_bias_regularizer
,
weight_decay
=
self
.
_weight_decay
,
use_bn
=
self
.
_use_bn
,
use_sync_bn
=
self
.
_use_sync_bn
,
norm_momentum
=
self
.
_norm_moment
,
norm_epsilon
=
self
.
_norm_epsilon
,
activation
=
self
.
_conv_activation
,
leaky_alpha
=
self
.
_leaky_alpha
)
self
.
_convlayer2
=
DarkConv
(
filters
=
self
.
_filters
//
2
,
kernel_size
=
(
3
,
3
),
strides
=
(
1
,
1
),
padding
=
'same'
,
use_bias
=
self
.
_use_bias
,
kernel_initializer
=
self
.
_kernel_initializer
,
bias_initializer
=
self
.
_bias_initializer
,
bias_regularizer
=
self
.
_bias_regularizer
,
weight_decay
=
self
.
_weight_decay
,
use_bn
=
self
.
_use_bn
,
use_sync_bn
=
self
.
_use_sync_bn
,
norm_momentum
=
self
.
_norm_moment
,
norm_epsilon
=
self
.
_norm_epsilon
,
activation
=
self
.
_conv_activation
,
leaky_alpha
=
self
.
_leaky_alpha
)
self
.
_convlayer3
=
DarkConv
(
filters
=
self
.
_filters
//
2
,
kernel_size
=
(
3
,
3
),
strides
=
(
1
,
1
),
padding
=
'same'
,
use_bias
=
self
.
_use_bias
,
kernel_initializer
=
self
.
_kernel_initializer
,
bias_initializer
=
self
.
_bias_initializer
,
bias_regularizer
=
self
.
_bias_regularizer
,
weight_decay
=
self
.
_weight_decay
,
use_bn
=
self
.
_use_bn
,
use_sync_bn
=
self
.
_use_sync_bn
,
norm_momentum
=
self
.
_norm_moment
,
norm_epsilon
=
self
.
_norm_epsilon
,
activation
=
self
.
_conv_activation
,
leaky_alpha
=
self
.
_leaky_alpha
)
self
.
_convlayer3
=
DarkConv
(
filters
=
self
.
_filters
//
2
,
kernel_size
=
(
3
,
3
),
strides
=
(
1
,
1
),
padding
=
'same'
,
use_bias
=
self
.
_use_bias
,
kernel_initializer
=
self
.
_kernel_initializer
,
bias_initializer
=
self
.
_bias_initializer
,
bias_regularizer
=
self
.
_bias_regularizer
,
weight_decay
=
self
.
_weight_decay
,
use_bn
=
self
.
_use_bn
,
use_sync_bn
=
self
.
_use_sync_bn
,
norm_momentum
=
self
.
_norm_moment
,
norm_epsilon
=
self
.
_norm_epsilon
,
activation
=
self
.
_conv_activation
,
leaky_alpha
=
self
.
_leaky_alpha
)
self
.
_convlayer4
=
DarkConv
(
filters
=
self
.
_filters
,
kernel_size
=
(
1
,
1
),
strides
=
(
1
,
1
),
padding
=
'same'
,
use_bias
=
self
.
_use_bias
,
kernel_initializer
=
self
.
_kernel_initializer
,
bias_initializer
=
self
.
_bias_initializer
,
bias_regularizer
=
self
.
_bias_regularizer
,
weight_decay
=
self
.
_weight_decay
,
use_bn
=
self
.
_use_bn
,
use_sync_bn
=
self
.
_use_sync_bn
,
norm_momentum
=
self
.
_norm_moment
,
norm_epsilon
=
self
.
_norm_epsilon
,
activation
=
self
.
_conv_activation
,
leaky_alpha
=
self
.
_leaky_alpha
)
self
.
_convlayer4
=
DarkConv
(
filters
=
self
.
_filters
,
kernel_size
=
(
1
,
1
),
strides
=
(
1
,
1
),
padding
=
'same'
,
use_bias
=
self
.
_use_bias
,
kernel_initializer
=
self
.
_kernel_initializer
,
bias_initializer
=
self
.
_bias_initializer
,
bias_regularizer
=
self
.
_bias_regularizer
,
weight_decay
=
self
.
_weight_decay
,
use_bn
=
self
.
_use_bn
,
use_sync_bn
=
self
.
_use_sync_bn
,
norm_momentum
=
self
.
_norm_moment
,
norm_epsilon
=
self
.
_norm_epsilon
,
activation
=
self
.
_conv_activation
,
leaky_alpha
=
self
.
_leaky_alpha
)
self
.
_maxpool
=
tf
.
keras
.
layers
.
MaxPool2D
(
pool_size
=
2
,
strides
=
2
,
padding
=
"same"
,
data_format
=
None
)
self
.
_maxpool
=
tf
.
keras
.
layers
.
MaxPool2D
(
pool_size
=
2
,
strides
=
2
,
padding
=
"same"
,
data_format
=
None
)
super
().
build
(
input_shape
)
return
super
().
build
(
input_shape
)
return
def
call
(
self
,
inputs
):
x1
=
self
.
_convlayer1
(
inputs
)
x2
=
tf
.
split
(
x1
,
self
.
_groups
,
axis
=
-
1
)
x3
=
self
.
_convlayer2
(
x2
[
self
.
_group_id
])
x4
=
self
.
_convlayer3
(
x3
)
x5
=
tf
.
concat
([
x4
,
x3
],
axis
=
-
1
)
x6
=
self
.
_convlayer4
(
x5
)
x
=
tf
.
concat
([
x1
,
x6
],
axis
=
-
1
)
if
self
.
_downsample
:
x
=
self
.
_maxpool
(
x
)
return
x
,
x6
def
call
(
self
,
inputs
):
x1
=
self
.
_convlayer1
(
inputs
)
x2
=
tf
.
split
(
x1
,
self
.
_groups
,
axis
=
-
1
)
x3
=
self
.
_convlayer2
(
x2
[
self
.
_group_id
])
x4
=
self
.
_convlayer3
(
x3
)
x5
=
tf
.
concat
([
x4
,
x3
],
axis
=
-
1
)
x6
=
self
.
_convlayer4
(
x5
)
x
=
tf
.
concat
([
x1
,
x6
],
axis
=-
1
)
if
self
.
_downsample
:
x
=
self
.
_maxpool
(
x
)
return
x
,
x6
def
get_config
(
self
):
# used to store/share parameters to reconsturct the model
layer_config
=
{
"filters"
:
self
.
_filters
,
"use_bias"
:
self
.
_use_bias
,
"strides"
:
self
.
_strides
,
"kernel_initializer"
:
self
.
_kernel_initializer
,
"bias_initializer"
:
self
.
_bias_initializer
,
"weight_decay"
:
self
.
_weight_decay
,
"use_bn"
:
self
.
_use_bn
,
"use_sync_bn"
:
self
.
_use_sync_bn
,
"norm_moment"
:
self
.
_norm_moment
,
"norm_epsilon"
:
self
.
_norm_epsilon
,
"activation"
:
self
.
_conv_activation
,
"leaky_alpha"
:
self
.
_leaky_alpha
,
"sc_activation"
:
self
.
_sc_activation
,
}
layer_config
.
update
(
super
().
get_config
())
return
layer_config
def
get_config
(
self
):
# used to store/share parameters to reconsturct the model
layer_config
=
{
"filters"
:
self
.
_filters
,
"use_bias"
:
self
.
_use_bias
,
"strides"
:
self
.
_strides
,
"kernel_initializer"
:
self
.
_kernel_initializer
,
"bias_initializer"
:
self
.
_bias_initializer
,
"weight_decay"
:
self
.
_weight_decay
,
"use_bn"
:
self
.
_use_bn
,
"use_sync_bn"
:
self
.
_use_sync_bn
,
"norm_moment"
:
self
.
_norm_moment
,
"norm_epsilon"
:
self
.
_norm_epsilon
,
"activation"
:
self
.
_conv_activation
,
"leaky_alpha"
:
self
.
_leaky_alpha
,
"sc_activation"
:
self
.
_sc_activation
,
}
layer_config
.
update
(
super
().
get_config
())
return
layer_config
official/vision/beta/projects/yolo/modeling/building_blocks/_DarkConv.py
View file @
e43d75f3
...
...
@@ -11,26 +11,27 @@ from yolo.modeling.functions.mish_activation import mish
@
ks
.
utils
.
register_keras_serializable
(
package
=
'yolo'
)
class
DarkConv
(
ks
.
layers
.
Layer
):
def
__init__
(
self
,
filters
=
1
,
kernel_size
=
(
1
,
1
),
strides
=
(
1
,
1
),
padding
=
'same'
,
dilation_rate
=
(
1
,
1
),
use_bias
=
True
,
kernel_initializer
=
'glorot_uniform'
,
bias_initializer
=
'zeros'
,
bias_regularizer
=
None
,
weight_decay
=
None
,
# default find where is it is stated
use_bn
=
True
,
use_sync_bn
=
False
,
norm_momentum
=
0.99
,
norm_epsilon
=
0.001
,
activation
=
'leaky'
,
leaky_alpha
=
0.1
,
**
kwargs
):
'''
def
__init__
(
self
,
filters
=
1
,
kernel_size
=
(
1
,
1
),
strides
=
(
1
,
1
),
padding
=
'same'
,
dilation_rate
=
(
1
,
1
),
use_bias
=
True
,
kernel_initializer
=
'glorot_uniform'
,
bias_initializer
=
'zeros'
,
bias_regularizer
=
None
,
weight_decay
=
None
,
# default find where is it is stated
use_bn
=
True
,
use_sync_bn
=
False
,
norm_momentum
=
0.99
,
norm_epsilon
=
0.001
,
activation
=
'leaky'
,
leaky_alpha
=
0.1
,
**
kwargs
):
'''
Modified Convolution layer to match that of the DarkNet Library
Args:
...
...
@@ -56,120 +57,118 @@ class DarkConv(ks.layers.Layer):
'''
# convolution params
self
.
_filters
=
filters
self
.
_kernel_size
=
kernel_size
self
.
_strides
=
strides
self
.
_padding
=
padding
self
.
_dilation_rate
=
dilation_rate
self
.
_use_bias
=
use_bias
self
.
_kernel_initializer
=
kernel_initializer
self
.
_bias_initializer
=
bias_initializer
self
.
_weight_decay
=
weight_decay
self
.
_bias_regularizer
=
bias_regularizer
# batchnorm params
self
.
_use_bn
=
use_bn
if
self
.
_use_bn
:
self
.
_use_bias
=
False
self
.
_use_sync_bn
=
use_sync_bn
self
.
_norm_moment
=
norm_momentum
self
.
_norm_epsilon
=
norm_epsilon
if
tf
.
keras
.
backend
.
image_data_format
()
==
'channels_last'
:
# format: (batch_size, height, width, channels)
self
.
_bn_axis
=
-
1
else
:
# format: (batch_size, channels, width, height)
self
.
_bn_axis
=
1
# activation params
if
activation
is
None
:
self
.
_activation
=
'linear'
else
:
self
.
_activation
=
activation
self
.
_leaky_alpha
=
leaky_alpha
super
(
DarkConv
,
self
).
__init__
(
**
kwargs
)
return
def
build
(
self
,
input_shape
):
kernel_size
=
self
.
_kernel_size
if
type
(
self
.
_kernel_size
)
==
int
else
self
.
_kernel_size
[
0
]
if
self
.
_padding
==
"same"
and
kernel_size
!=
1
:
self
.
_zeropad
=
ks
.
layers
.
ZeroPadding2D
(
((
1
,
1
),
(
1
,
1
)))
# symetric padding
else
:
self
.
_zeropad
=
Identity
()
self
.
conv
=
ks
.
layers
.
Conv2D
(
filters
=
self
.
_filters
,
kernel_size
=
self
.
_kernel_size
,
strides
=
self
.
_strides
,
padding
=
"valid"
,
#self._padding,
dilation_rate
=
self
.
_dilation_rate
,
use_bias
=
self
.
_use_bias
,
kernel_initializer
=
self
.
_kernel_initializer
,
bias_initializer
=
self
.
_bias_initializer
,
kernel_regularizer
=
self
.
_weight_decay
,
bias_regularizer
=
self
.
_bias_regularizer
)
#self.conv =tf.nn.convolution(filters=self._filters, strides=self._strides, padding=self._padding
if
self
.
_use_bn
:
if
self
.
_use_sync_bn
:
self
.
bn
=
tf
.
keras
.
layers
.
experimental
.
SyncBatchNormalization
(
momentum
=
self
.
_norm_moment
,
epsilon
=
self
.
_norm_epsilon
,
axis
=
self
.
_bn_axis
)
else
:
self
.
bn
=
ks
.
layers
.
BatchNormalization
(
momentum
=
self
.
_norm_moment
,
epsilon
=
self
.
_norm_epsilon
,
axis
=
self
.
_bn_axis
)
else
:
self
.
bn
=
Identity
()
if
self
.
_activation
==
'leaky'
:
alpha
=
{
"alpha"
:
self
.
_leaky_alpha
}
self
.
_activation_fn
=
partial
(
tf
.
nn
.
leaky_relu
,
**
alpha
)
elif
self
.
_activation
==
'mish'
:
self
.
_activation_fn
=
mish
()
else
:
self
.
_activation_fn
=
ks
.
layers
.
Activation
(
activation
=
self
.
_activation
)
super
(
DarkConv
,
self
).
build
(
input_shape
)
return
def
call
(
self
,
inputs
):
x
=
self
.
_zeropad
(
inputs
)
x
=
self
.
conv
(
x
)
x
=
self
.
bn
(
x
)
x
=
self
.
_activation_fn
(
x
)
return
x
def
get_config
(
self
):
# used to store/share parameters to reconsturct the model
layer_config
=
{
"filters"
:
self
.
_filters
,
"kernel_size"
:
self
.
_kernel_size
,
"strides"
:
self
.
_strides
,
"padding"
:
self
.
_padding
,
"dilation_rate"
:
self
.
_dilation_rate
,
"use_bias"
:
self
.
_use_bias
,
"kernel_initializer"
:
self
.
_kernel_initializer
,
"bias_initializer"
:
self
.
_bias_initializer
,
"bias_regularizer"
:
self
.
_bias_regularizer
,
"l2_regularization"
:
self
.
_l2_regularization
,
"use_bn"
:
self
.
_use_bn
,
"use_sync_bn"
:
self
.
_use_sync_bn
,
"norm_moment"
:
self
.
_norm_moment
,
"norm_epsilon"
:
self
.
_norm_epsilon
,
"activation"
:
self
.
_activation
,
"leaky_alpha"
:
self
.
_leaky_alpha
}
layer_config
.
update
(
super
(
DarkConv
,
self
).
get_config
())
return
layer_config
def
__repr__
(
self
):
return
repr
(
self
.
get_config
())
# convolution params
self
.
_filters
=
filters
self
.
_kernel_size
=
kernel_size
self
.
_strides
=
strides
self
.
_padding
=
padding
self
.
_dilation_rate
=
dilation_rate
self
.
_use_bias
=
use_bias
self
.
_kernel_initializer
=
kernel_initializer
self
.
_bias_initializer
=
bias_initializer
self
.
_weight_decay
=
weight_decay
self
.
_bias_regularizer
=
bias_regularizer
# batchnorm params
self
.
_use_bn
=
use_bn
if
self
.
_use_bn
:
self
.
_use_bias
=
False
self
.
_use_sync_bn
=
use_sync_bn
self
.
_norm_moment
=
norm_momentum
self
.
_norm_epsilon
=
norm_epsilon
if
tf
.
keras
.
backend
.
image_data_format
()
==
'channels_last'
:
# format: (batch_size, height, width, channels)
self
.
_bn_axis
=
-
1
else
:
# format: (batch_size, channels, width, height)
self
.
_bn_axis
=
1
# activation params
if
activation
is
None
:
self
.
_activation
=
'linear'
else
:
self
.
_activation
=
activation
self
.
_leaky_alpha
=
leaky_alpha
super
(
DarkConv
,
self
).
__init__
(
**
kwargs
)
return
def
build
(
self
,
input_shape
):
kernel_size
=
self
.
_kernel_size
if
type
(
self
.
_kernel_size
)
==
int
else
self
.
_kernel_size
[
0
]
if
self
.
_padding
==
"same"
and
kernel_size
!=
1
:
self
.
_zeropad
=
ks
.
layers
.
ZeroPadding2D
(
((
1
,
1
),
(
1
,
1
)))
# symetric padding
else
:
self
.
_zeropad
=
Identity
()
self
.
conv
=
ks
.
layers
.
Conv2D
(
filters
=
self
.
_filters
,
kernel_size
=
self
.
_kernel_size
,
strides
=
self
.
_strides
,
padding
=
"valid"
,
#self._padding,
dilation_rate
=
self
.
_dilation_rate
,
use_bias
=
self
.
_use_bias
,
kernel_initializer
=
self
.
_kernel_initializer
,
bias_initializer
=
self
.
_bias_initializer
,
kernel_regularizer
=
self
.
_weight_decay
,
bias_regularizer
=
self
.
_bias_regularizer
)
#self.conv =tf.nn.convolution(filters=self._filters, strides=self._strides, padding=self._padding
if
self
.
_use_bn
:
if
self
.
_use_sync_bn
:
self
.
bn
=
tf
.
keras
.
layers
.
experimental
.
SyncBatchNormalization
(
momentum
=
self
.
_norm_moment
,
epsilon
=
self
.
_norm_epsilon
,
axis
=
self
.
_bn_axis
)
else
:
self
.
bn
=
ks
.
layers
.
BatchNormalization
(
momentum
=
self
.
_norm_moment
,
epsilon
=
self
.
_norm_epsilon
,
axis
=
self
.
_bn_axis
)
else
:
self
.
bn
=
Identity
()
if
self
.
_activation
==
'leaky'
:
alpha
=
{
"alpha"
:
self
.
_leaky_alpha
}
self
.
_activation_fn
=
partial
(
tf
.
nn
.
leaky_relu
,
**
alpha
)
elif
self
.
_activation
==
'mish'
:
self
.
_activation_fn
=
mish
()
else
:
self
.
_activation_fn
=
ks
.
layers
.
Activation
(
activation
=
self
.
_activation
)
super
(
DarkConv
,
self
).
build
(
input_shape
)
return
def
call
(
self
,
inputs
):
x
=
self
.
_zeropad
(
inputs
)
x
=
self
.
conv
(
x
)
x
=
self
.
bn
(
x
)
x
=
self
.
_activation_fn
(
x
)
return
x
def
get_config
(
self
):
# used to store/share parameters to reconsturct the model
layer_config
=
{
"filters"
:
self
.
_filters
,
"kernel_size"
:
self
.
_kernel_size
,
"strides"
:
self
.
_strides
,
"padding"
:
self
.
_padding
,
"dilation_rate"
:
self
.
_dilation_rate
,
"use_bias"
:
self
.
_use_bias
,
"kernel_initializer"
:
self
.
_kernel_initializer
,
"bias_initializer"
:
self
.
_bias_initializer
,
"bias_regularizer"
:
self
.
_bias_regularizer
,
"l2_regularization"
:
self
.
_l2_regularization
,
"use_bn"
:
self
.
_use_bn
,
"use_sync_bn"
:
self
.
_use_sync_bn
,
"norm_moment"
:
self
.
_norm_moment
,
"norm_epsilon"
:
self
.
_norm_epsilon
,
"activation"
:
self
.
_activation
,
"leaky_alpha"
:
self
.
_leaky_alpha
}
layer_config
.
update
(
super
(
DarkConv
,
self
).
get_config
())
return
layer_config
def
__repr__
(
self
):
return
repr
(
self
.
get_config
())
official/vision/beta/projects/yolo/modeling/building_blocks/_DarkResidual.py
View file @
e43d75f3
...
...
@@ -7,24 +7,25 @@ from ._Identity import Identity
@
ks
.
utils
.
register_keras_serializable
(
package
=
'yolo'
)
class
DarkResidual
(
ks
.
layers
.
Layer
):
def
__init__
(
self
,
filters
=
1
,
filter_scale
=
2
,
use_bias
=
True
,
kernel_initializer
=
'glorot_uniform'
,
bias_initializer
=
'zeros'
,
weight_decay
=
None
,
bias_regularizer
=
None
,
use_bn
=
True
,
use_sync_bn
=
False
,
norm_momentum
=
0.99
,
norm_epsilon
=
0.001
,
activation
=
'leaky'
,
leaky_alpha
=
0.1
,
sc_activation
=
'linear'
,
downsample
=
False
,
**
kwargs
):
'''
def
__init__
(
self
,
filters
=
1
,
filter_scale
=
2
,
use_bias
=
True
,
kernel_initializer
=
'glorot_uniform'
,
bias_initializer
=
'zeros'
,
weight_decay
=
None
,
bias_regularizer
=
None
,
use_bn
=
True
,
use_sync_bn
=
False
,
norm_momentum
=
0.99
,
norm_epsilon
=
0.001
,
activation
=
'leaky'
,
leaky_alpha
=
0.1
,
sc_activation
=
'linear'
,
downsample
=
False
,
**
kwargs
):
'''
DarkNet block with Residual connection for Yolo v3 Backbone
Args:
...
...
@@ -46,113 +47,112 @@ class DarkResidual(ks.layers.Layer):
**kwargs: Keyword Arguments
'''
# downsample
self
.
_downsample
=
downsample
# downsample
self
.
_downsample
=
downsample
# darkconv params
self
.
_filters
=
filters
self
.
_filter_scale
=
filter_scale
self
.
_use_bias
=
use_bias
self
.
_kernel_initializer
=
kernel_initializer
self
.
_bias_initializer
=
bias_initializer
self
.
_bias_regularizer
=
bias_regularizer
self
.
_use_bn
=
use_bn
self
.
_use_sync_bn
=
use_sync_bn
self
.
_weight_decay
=
weight_decay
# darkconv params
self
.
_filters
=
filters
self
.
_filter_scale
=
filter_scale
self
.
_use_bias
=
use_bias
self
.
_kernel_initializer
=
kernel_initializer
self
.
_bias_initializer
=
bias_initializer
self
.
_bias_regularizer
=
bias_regularizer
self
.
_use_bn
=
use_bn
self
.
_use_sync_bn
=
use_sync_bn
self
.
_weight_decay
=
weight_decay
# normal params
self
.
_norm_moment
=
norm_momentum
self
.
_norm_epsilon
=
norm_epsilon
# normal params
self
.
_norm_moment
=
norm_momentum
self
.
_norm_epsilon
=
norm_epsilon
# activation params
self
.
_conv_activation
=
activation
self
.
_leaky_alpha
=
leaky_alpha
self
.
_sc_activation
=
sc_activation
# activation params
self
.
_conv_activation
=
activation
self
.
_leaky_alpha
=
leaky_alpha
self
.
_sc_activation
=
sc_activation
super
().
__init__
(
**
kwargs
)
return
super
().
__init__
(
**
kwargs
)
return
def
build
(
self
,
input_shape
):
if
self
.
_downsample
:
self
.
_dconv
=
DarkConv
(
filters
=
self
.
_filters
,
kernel_size
=
(
3
,
3
),
strides
=
(
2
,
2
),
padding
=
'same'
,
use_bias
=
self
.
_use_bias
,
kernel_initializer
=
self
.
_kernel_initializer
,
bias_initializer
=
self
.
_bias_initializer
,
bias_regularizer
=
self
.
_bias_regularizer
,
use_bn
=
self
.
_use_bn
,
use_sync_bn
=
self
.
_use_sync_bn
,
norm_momentum
=
self
.
_norm_moment
,
norm_epsilon
=
self
.
_norm_epsilon
,
activation
=
self
.
_conv_activation
,
weight_decay
=
self
.
_weight_decay
,
leaky_alpha
=
self
.
_leaky_alpha
)
else
:
self
.
_dconv
=
Identity
()
def
build
(
self
,
input_shape
):
if
self
.
_downsample
:
self
.
_dconv
=
DarkConv
(
filters
=
self
.
_filters
,
kernel_size
=
(
3
,
3
),
strides
=
(
2
,
2
),
padding
=
'same'
,
use_bias
=
self
.
_use_bias
,
kernel_initializer
=
self
.
_kernel_initializer
,
bias_initializer
=
self
.
_bias_initializer
,
bias_regularizer
=
self
.
_bias_regularizer
,
use_bn
=
self
.
_use_bn
,
use_sync_bn
=
self
.
_use_sync_bn
,
norm_momentum
=
self
.
_norm_moment
,
norm_epsilon
=
self
.
_norm_epsilon
,
activation
=
self
.
_conv_activation
,
weight_decay
=
self
.
_weight_decay
,
leaky_alpha
=
self
.
_leaky_alpha
)
else
:
self
.
_dconv
=
Identity
()
self
.
_conv1
=
DarkConv
(
filters
=
self
.
_filters
//
self
.
_filter_scale
,
kernel_size
=
(
1
,
1
),
strides
=
(
1
,
1
),
padding
=
'same'
,
use_bias
=
self
.
_use_bias
,
kernel_initializer
=
self
.
_kernel_initializer
,
bias_initializer
=
self
.
_bias_initializer
,
bias_regularizer
=
self
.
_bias_regularizer
,
use_bn
=
self
.
_use_bn
,
use_sync_bn
=
self
.
_use_sync_bn
,
norm_momentum
=
self
.
_norm_moment
,
norm_epsilon
=
self
.
_norm_epsilon
,
activation
=
self
.
_conv_activation
,
weight_decay
=
self
.
_weight_decay
,
leaky_alpha
=
self
.
_leaky_alpha
)
self
.
_conv2
=
DarkConv
(
filters
=
self
.
_filters
,
kernel_size
=
(
3
,
3
),
strides
=
(
1
,
1
),
padding
=
'same'
,
use_bias
=
self
.
_use_bias
,
kernel_initializer
=
self
.
_kernel_initializer
,
bias_initializer
=
self
.
_bias_initializer
,
bias_regularizer
=
self
.
_bias_regularizer
,
use_bn
=
self
.
_use_bn
,
use_sync_bn
=
self
.
_use_sync_bn
,
norm_momentum
=
self
.
_norm_moment
,
norm_epsilon
=
self
.
_norm_epsilon
,
activation
=
self
.
_conv_activation
,
weight_decay
=
self
.
_weight_decay
,
leaky_alpha
=
self
.
_leaky_alpha
)
self
.
_conv1
=
DarkConv
(
filters
=
self
.
_filters
//
self
.
_filter_scale
,
kernel_size
=
(
1
,
1
),
strides
=
(
1
,
1
),
padding
=
'same'
,
use_bias
=
self
.
_use_bias
,
kernel_initializer
=
self
.
_kernel_initializer
,
bias_initializer
=
self
.
_bias_initializer
,
bias_regularizer
=
self
.
_bias_regularizer
,
use_bn
=
self
.
_use_bn
,
use_sync_bn
=
self
.
_use_sync_bn
,
norm_momentum
=
self
.
_norm_moment
,
norm_epsilon
=
self
.
_norm_epsilon
,
activation
=
self
.
_conv_activation
,
weight_decay
=
self
.
_weight_decay
,
leaky_alpha
=
self
.
_leaky_alpha
)
self
.
_conv2
=
DarkConv
(
filters
=
self
.
_filters
,
kernel_size
=
(
3
,
3
),
strides
=
(
1
,
1
),
padding
=
'same'
,
use_bias
=
self
.
_use_bias
,
kernel_initializer
=
self
.
_kernel_initializer
,
bias_initializer
=
self
.
_bias_initializer
,
bias_regularizer
=
self
.
_bias_regularizer
,
use_bn
=
self
.
_use_bn
,
use_sync_bn
=
self
.
_use_sync_bn
,
norm_momentum
=
self
.
_norm_moment
,
norm_epsilon
=
self
.
_norm_epsilon
,
activation
=
self
.
_conv_activation
,
weight_decay
=
self
.
_weight_decay
,
leaky_alpha
=
self
.
_leaky_alpha
)
self
.
_shortcut
=
ks
.
layers
.
Add
()
self
.
_activation_fn
=
ks
.
layers
.
Activation
(
activation
=
self
.
_sc_activation
)
self
.
_shortcut
=
ks
.
layers
.
Add
()
self
.
_activation_fn
=
ks
.
layers
.
Activation
(
activation
=
self
.
_sc_activation
)
super
().
build
(
input_shape
)
return
super
().
build
(
input_shape
)
return
def
call
(
self
,
inputs
):
shortcut
=
self
.
_dconv
(
inputs
)
x
=
self
.
_conv1
(
shortcut
)
x
=
self
.
_conv2
(
x
)
x
=
self
.
_shortcut
([
x
,
shortcut
])
return
self
.
_activation_fn
(
x
)
def
call
(
self
,
inputs
):
shortcut
=
self
.
_dconv
(
inputs
)
x
=
self
.
_conv1
(
shortcut
)
x
=
self
.
_conv2
(
x
)
x
=
self
.
_shortcut
([
x
,
shortcut
])
return
self
.
_activation_fn
(
x
)
def
get_config
(
self
):
# used to store/share parameters to reconsturct the model
layer_config
=
{
"filters"
:
self
.
_filters
,
"use_bias"
:
self
.
_use_bias
,
"kernel_initializer"
:
self
.
_kernel_initializer
,
"bias_initializer"
:
self
.
_bias_initializer
,
"weight_decay"
:
self
.
_weight_decay
,
"use_bn"
:
self
.
_use_bn
,
"use_sync_bn"
:
self
.
_use_sync_bn
,
"norm_moment"
:
self
.
_norm_moment
,
"norm_epsilon"
:
self
.
_norm_epsilon
,
"activation"
:
self
.
_conv_activation
,
"leaky_alpha"
:
self
.
_leaky_alpha
,
"sc_activation"
:
self
.
_sc_activation
,
"downsample"
:
self
.
_downsample
}
layer_config
.
update
(
super
().
get_config
())
return
layer_config
def
get_config
(
self
):
# used to store/share parameters to reconsturct the model
layer_config
=
{
"filters"
:
self
.
_filters
,
"use_bias"
:
self
.
_use_bias
,
"kernel_initializer"
:
self
.
_kernel_initializer
,
"bias_initializer"
:
self
.
_bias_initializer
,
"weight_decay"
:
self
.
_weight_decay
,
"use_bn"
:
self
.
_use_bn
,
"use_sync_bn"
:
self
.
_use_sync_bn
,
"norm_moment"
:
self
.
_norm_moment
,
"norm_epsilon"
:
self
.
_norm_epsilon
,
"activation"
:
self
.
_conv_activation
,
"leaky_alpha"
:
self
.
_leaky_alpha
,
"sc_activation"
:
self
.
_sc_activation
,
"downsample"
:
self
.
_downsample
}
layer_config
.
update
(
super
().
get_config
())
return
layer_config
official/vision/beta/projects/yolo/modeling/building_blocks/_DarkTiny.py
View file @
e43d75f3
...
...
@@ -6,98 +6,100 @@ from ._DarkConv import DarkConv
@
ks
.
utils
.
register_keras_serializable
(
package
=
'yolo'
)
class
DarkTiny
(
ks
.
layers
.
Layer
):
def
__init__
(
self
,
filters
=
1
,
use_bias
=
True
,
strides
=
2
,
kernel_initializer
=
'glorot_uniform'
,
bias_initializer
=
'zeros'
,
bias_regularizer
=
None
,
weight_decay
=
None
,
# default find where is it is stated
use_bn
=
True
,
use_sync_bn
=
False
,
norm_momentum
=
0.99
,
norm_epsilon
=
0.001
,
activation
=
'leaky'
,
leaky_alpha
=
0.1
,
sc_activation
=
'linear'
,
**
kwargs
):
# darkconv params
self
.
_filters
=
filters
self
.
_use_bias
=
use_bias
self
.
_kernel_initializer
=
kernel_initializer
self
.
_bias_initializer
=
bias_initializer
self
.
_bias_regularizer
=
bias_regularizer
self
.
_use_bn
=
use_bn
self
.
_use_sync_bn
=
use_sync_bn
self
.
_strides
=
strides
self
.
_weight_decay
=
weight_decay
def
__init__
(
self
,
filters
=
1
,
use_bias
=
True
,
strides
=
2
,
kernel_initializer
=
'glorot_uniform'
,
bias_initializer
=
'zeros'
,
bias_regularizer
=
None
,
weight_decay
=
None
,
# default find where is it is stated
use_bn
=
True
,
use_sync_bn
=
False
,
norm_momentum
=
0.99
,
norm_epsilon
=
0.001
,
activation
=
'leaky'
,
leaky_alpha
=
0.1
,
sc_activation
=
'linear'
,
**
kwargs
):
# normal params
self
.
_norm_moment
=
norm_momentum
self
.
_norm_epsilon
=
norm_epsilon
# darkconv params
self
.
_filters
=
filters
self
.
_use_bias
=
use_bias
self
.
_kernel_initializer
=
kernel_initializer
self
.
_bias_initializer
=
bias_initializer
self
.
_bias_regularizer
=
bias_regularizer
self
.
_use_bn
=
use_bn
self
.
_use_sync_bn
=
use_sync_bn
self
.
_strides
=
strides
self
.
_weight_decay
=
weight_decay
# activation params
self
.
_conv_activation
=
activation
self
.
_leaky_alpha
=
leaky_alpha
self
.
_sc_activation
=
sc_activation
# normal params
self
.
_norm_moment
=
norm_momentum
self
.
_norm_epsilon
=
norm_epsilon
super
().
__init__
(
**
kwargs
)
return
# activation params
self
.
_conv_activation
=
activation
self
.
_leaky_alpha
=
leaky_alpha
self
.
_sc_activation
=
sc_activation
def
build
(
self
,
input_shape
):
# if self._strides == 2:
# self._zeropad = ks.layers.ZeroPadding2D(((1,0), (1,0)))
# padding = "valid"
# else:
# self._zeropad = ks.layers.ZeroPadding2D(((0,1), (0,1)))#nn_blocks.Identity()#ks.layers.ZeroPadding2D(((1,0), (1,0)))
# padding = "valid"
self
.
_maxpool
=
tf
.
keras
.
layers
.
MaxPool2D
(
pool_size
=
2
,
strides
=
self
.
_strides
,
padding
=
"same"
,
data_format
=
None
)
super
().
__init__
(
**
kwargs
)
return
self
.
_convlayer
=
DarkConv
(
filters
=
self
.
_filters
,
kernel_size
=
(
3
,
3
),
strides
=
(
1
,
1
),
padding
=
'same'
,
use_bias
=
self
.
_use_bias
,
kernel_initializer
=
self
.
_kernel_initializer
,
bias_initializer
=
self
.
_bias_initializer
,
bias_regularizer
=
self
.
_bias_regularizer
,
weight_decay
=
self
.
_weight_decay
,
use_bn
=
self
.
_use_bn
,
use_sync_bn
=
self
.
_use_sync_bn
,
norm_momentum
=
self
.
_norm_moment
,
norm_epsilon
=
self
.
_norm_epsilon
,
activation
=
self
.
_conv_activation
,
leaky_alpha
=
self
.
_leaky_alpha
)
def
build
(
self
,
input_shape
):
# if self._strides == 2:
# self._zeropad = ks.layers.ZeroPadding2D(((1,0), (1,0)))
# padding = "valid"
# else:
# self._zeropad = ks.layers.ZeroPadding2D(((0,1), (0,1)))#nn_blocks.Identity()#ks.layers.ZeroPadding2D(((1,0), (1,0)))
# padding = "valid"
self
.
_maxpool
=
tf
.
keras
.
layers
.
MaxPool2D
(
pool_size
=
2
,
strides
=
self
.
_strides
,
padding
=
"same"
,
data_format
=
None
)
super
().
build
(
input_shape
)
return
self
.
_convlayer
=
DarkConv
(
filters
=
self
.
_filters
,
kernel_size
=
(
3
,
3
),
strides
=
(
1
,
1
),
padding
=
'same'
,
use_bias
=
self
.
_use_bias
,
kernel_initializer
=
self
.
_kernel_initializer
,
bias_initializer
=
self
.
_bias_initializer
,
bias_regularizer
=
self
.
_bias_regularizer
,
weight_decay
=
self
.
_weight_decay
,
use_bn
=
self
.
_use_bn
,
use_sync_bn
=
self
.
_use_sync_bn
,
norm_momentum
=
self
.
_norm_moment
,
norm_epsilon
=
self
.
_norm_epsilon
,
activation
=
self
.
_conv_activation
,
leaky_alpha
=
self
.
_leaky_alpha
)
def
call
(
self
,
inputs
):
output
=
self
.
_maxpool
(
inputs
)
output
=
self
.
_convlayer
(
output
)
return
output
super
().
build
(
input_shape
)
return
def
get_config
(
self
):
# used to store/share parameters to reconsturct the model
layer_config
=
{
"filters"
:
self
.
_filters
,
"use_bias"
:
self
.
_use_bias
,
"strides"
:
self
.
_strides
,
"kernel_initializer"
:
self
.
_kernel_initializer
,
"bias_initializer"
:
self
.
_bias_initializer
,
"l2_regularization"
:
self
.
_l2_regularization
,
"use_bn"
:
self
.
_use_bn
,
"use_sync_bn"
:
self
.
_use_sync_bn
,
"norm_moment"
:
self
.
_norm_moment
,
"norm_epsilon"
:
self
.
_norm_epsilon
,
"activation"
:
self
.
_conv_activation
,
"leaky_alpha"
:
self
.
_leaky_alpha
,
"sc_activation"
:
self
.
_sc_activation
,
}
layer_config
.
update
(
super
().
get_config
())
return
layer_config
def
call
(
self
,
inputs
):
output
=
self
.
_maxpool
(
inputs
)
output
=
self
.
_convlayer
(
output
)
return
output
def
get_config
(
self
):
# used to store/share parameters to reconsturct the model
layer_config
=
{
"filters"
:
self
.
_filters
,
"use_bias"
:
self
.
_use_bias
,
"strides"
:
self
.
_strides
,
"kernel_initializer"
:
self
.
_kernel_initializer
,
"bias_initializer"
:
self
.
_bias_initializer
,
"l2_regularization"
:
self
.
_l2_regularization
,
"use_bn"
:
self
.
_use_bn
,
"use_sync_bn"
:
self
.
_use_sync_bn
,
"norm_moment"
:
self
.
_norm_moment
,
"norm_epsilon"
:
self
.
_norm_epsilon
,
"activation"
:
self
.
_conv_activation
,
"leaky_alpha"
:
self
.
_leaky_alpha
,
"sc_activation"
:
self
.
_sc_activation
,
}
layer_config
.
update
(
super
().
get_config
())
return
layer_config
official/vision/beta/projects/yolo/modeling/building_blocks/__init__.py
View file @
e43d75f3
...
...
@@ -4,4 +4,3 @@ from ._DarkTiny import DarkTiny
from
._CSPConnect
import
CSPConnect
from
._CSPDownSample
import
CSPDownSample
from
._CSPTiny
import
CSPTiny
official/vision/beta/projects/yolo/modeling/tests/test_CSPConnect.py
View file @
e43d75f3
...
...
@@ -8,48 +8,49 @@ from official.vision.beta.projects.yolo.modeling.building_blocks import CSPConne
class
CSPConnect
(
tf
.
test
.
TestCase
,
parameterized
.
TestCase
):
@
parameterized
.
named_parameters
((
"same"
,
224
,
224
,
64
,
1
),
(
"downsample"
,
224
,
224
,
64
,
2
))
def
test_pass_through
(
self
,
width
,
height
,
filters
,
mod
):
x
=
ks
.
Input
(
shape
=
(
width
,
height
,
filters
))
test_layer
=
layer
(
filters
=
filters
,
filter_reduce
=
mod
)
test_layer2
=
layer_companion
(
filters
=
filters
,
filter_reduce
=
mod
)
outx
,
px
=
test_layer
(
x
)
outx
=
test_layer2
([
outx
,
px
])
print
(
outx
)
print
(
outx
.
shape
.
as_list
())
self
.
assertAllEqual
(
outx
.
shape
.
as_list
(),
[
None
,
np
.
ceil
(
width
//
2
),
np
.
ceil
(
height
//
2
),
(
filters
)])
return
@
parameterized
.
named_parameters
((
"same"
,
224
,
224
,
64
,
1
),
(
"downsample"
,
224
,
224
,
128
,
2
))
def
test_gradient_pass_though
(
self
,
filters
,
width
,
height
,
mod
):
loss
=
ks
.
losses
.
MeanSquaredError
()
optimizer
=
ks
.
optimizers
.
SGD
()
test_layer
=
layer
(
filters
,
filter_reduce
=
mod
)
path_layer
=
layer_companion
(
filters
,
filter_reduce
=
mod
)
init
=
tf
.
random_normal_initializer
()
x
=
tf
.
Variable
(
initial_value
=
init
(
shape
=
(
1
,
width
,
height
,
filters
),
dtype
=
tf
.
float32
))
y
=
tf
.
Variable
(
initial_value
=
init
(
shape
=
(
1
,
int
(
np
.
ceil
(
width
//
2
)),
int
(
np
.
ceil
(
height
//
2
)),
filters
),
dtype
=
tf
.
float32
))
with
tf
.
GradientTape
()
as
tape
:
x_hat
,
x_prev
=
test_layer
(
x
)
x_hat
=
path_layer
([
x_hat
,
x_prev
])
grad_loss
=
loss
(
x_hat
,
y
)
grad
=
tape
.
gradient
(
grad_loss
,
test_layer
.
trainable_variables
)
optimizer
.
apply_gradients
(
zip
(
grad
,
test_layer
.
trainable_variables
))
self
.
assertNotIn
(
None
,
grad
)
return
@
parameterized
.
named_parameters
((
"same"
,
224
,
224
,
64
,
1
),
(
"downsample"
,
224
,
224
,
64
,
2
))
def
test_pass_through
(
self
,
width
,
height
,
filters
,
mod
):
x
=
ks
.
Input
(
shape
=
(
width
,
height
,
filters
))
test_layer
=
layer
(
filters
=
filters
,
filter_reduce
=
mod
)
test_layer2
=
layer_companion
(
filters
=
filters
,
filter_reduce
=
mod
)
outx
,
px
=
test_layer
(
x
)
outx
=
test_layer2
([
outx
,
px
])
print
(
outx
)
print
(
outx
.
shape
.
as_list
())
self
.
assertAllEqual
(
outx
.
shape
.
as_list
(),
[
None
,
np
.
ceil
(
width
//
2
),
np
.
ceil
(
height
//
2
),
(
filters
)])
return
@
parameterized
.
named_parameters
((
"same"
,
224
,
224
,
64
,
1
),
(
"downsample"
,
224
,
224
,
128
,
2
))
def
test_gradient_pass_though
(
self
,
filters
,
width
,
height
,
mod
):
loss
=
ks
.
losses
.
MeanSquaredError
()
optimizer
=
ks
.
optimizers
.
SGD
()
test_layer
=
layer
(
filters
,
filter_reduce
=
mod
)
path_layer
=
layer_companion
(
filters
,
filter_reduce
=
mod
)
init
=
tf
.
random_normal_initializer
()
x
=
tf
.
Variable
(
initial_value
=
init
(
shape
=
(
1
,
width
,
height
,
filters
),
dtype
=
tf
.
float32
))
y
=
tf
.
Variable
(
initial_value
=
init
(
shape
=
(
1
,
int
(
np
.
ceil
(
width
//
2
)),
int
(
np
.
ceil
(
height
//
2
)),
filters
),
dtype
=
tf
.
float32
))
with
tf
.
GradientTape
()
as
tape
:
x_hat
,
x_prev
=
test_layer
(
x
)
x_hat
=
path_layer
([
x_hat
,
x_prev
])
grad_loss
=
loss
(
x_hat
,
y
)
grad
=
tape
.
gradient
(
grad_loss
,
test_layer
.
trainable_variables
)
optimizer
.
apply_gradients
(
zip
(
grad
,
test_layer
.
trainable_variables
))
self
.
assertNotIn
(
None
,
grad
)
return
if
__name__
==
"__main__"
:
tf
.
test
.
main
()
tf
.
test
.
main
()
official/vision/beta/projects/yolo/modeling/tests/test_CSPDownSample.py
View file @
e43d75f3
...
...
@@ -6,48 +6,49 @@ from absl.testing import parameterized
from
official.vision.beta.projects.yolo.modeling.building_blocks
import
CSPDownSample
as
layer
from
official.vision.beta.projects.yolo.modeling.building_blocks
import
CSPConnect
as
layer_companion
class
CSPDownSample
(
tf
.
test
.
TestCase
,
parameterized
.
TestCase
):
@
parameterized
.
named_parameters
((
"same"
,
224
,
224
,
64
,
1
),
(
"downsampl
e"
,
224
,
224
,
64
,
2
))
def
test_pass_through
(
self
,
width
,
height
,
filters
,
mod
):
x
=
ks
.
Input
(
shape
=
(
width
,
height
,
filters
))
test_layer
=
layer
(
filters
=
filters
,
filter_reduce
=
mod
)
outx
,
px
=
test_layer
(
x
)
print
(
out
x
)
print
(
outx
.
shape
.
as_list
()
)
self
.
assertAllEqual
(
outx
.
shape
.
as_list
(),
[
None
,
np
.
ceil
(
width
//
2
),
np
.
ceil
(
height
//
2
),
(
filters
/
mod
)])
return
@
parameterized
.
named_parameters
((
"same"
,
224
,
224
,
64
,
1
),
(
"downsample"
,
224
,
224
,
128
,
2
))
def
test_gradient_pass_though
(
self
,
filters
,
width
,
height
,
mod
):
loss
=
ks
.
losses
.
MeanSquaredError
()
optimizer
=
ks
.
optimizers
.
SGD
()
test_layer
=
layer
(
filters
,
filter_reduce
=
mod
)
path_layer
=
layer_companion
(
filters
,
filter_reduce
=
mod
)
init
=
tf
.
random_normal_initializer
()
x
=
tf
.
Variable
(
initial_value
=
init
(
shape
=
(
1
,
width
,
height
,
filters
),
dtype
=
tf
.
float32
))
y
=
tf
.
Variable
(
initial_value
=
init
(
shape
=
(
1
,
int
(
np
.
ceil
(
width
//
2
)),
int
(
np
.
ceil
(
height
//
2
)),
filters
),
dtype
=
tf
.
float32
))
with
tf
.
GradientTape
()
as
tape
:
x_hat
,
x_prev
=
test_layer
(
x
)
x_hat
=
path_layer
([
x_hat
,
x_prev
])
grad_loss
=
loss
(
x_hat
,
y
)
grad
=
tape
.
gradient
(
grad_loss
,
test_layer
.
trainable_variables
)
optimizer
.
apply_gradients
(
zip
(
grad
,
test_layer
.
trainable_variables
))
self
.
assertNotIn
(
None
,
grad
)
return
@
parameterized
.
named_parameters
((
"sam
e"
,
224
,
224
,
64
,
1
),
(
"downsample"
,
224
,
224
,
64
,
2
))
def
test_pass_through
(
self
,
width
,
height
,
filters
,
mod
):
x
=
ks
.
Input
(
shape
=
(
width
,
height
,
filters
)
)
test_layer
=
layer
(
filters
=
filters
,
filter_reduce
=
mod
)
outx
,
px
=
test_layer
(
x
)
print
(
outx
)
print
(
outx
.
shape
.
as_list
())
self
.
assertAllEqual
(
outx
.
shape
.
as_list
()
,
[
None
,
np
.
ceil
(
width
//
2
),
np
.
ceil
(
height
//
2
),
(
filters
/
mod
)])
return
@
parameterized
.
named_parameters
((
"same"
,
224
,
224
,
64
,
1
),
(
"downsample"
,
224
,
224
,
128
,
2
))
def
test_gradient_pass_though
(
self
,
filters
,
width
,
height
,
mod
):
loss
=
ks
.
losses
.
MeanSquaredError
()
optimizer
=
ks
.
optimizers
.
SGD
()
test_layer
=
layer
(
filters
,
filter_reduce
=
mod
)
path_layer
=
layer_companion
(
filters
,
filter_reduce
=
mod
)
init
=
tf
.
random_normal_initializer
()
x
=
tf
.
Variable
(
initial_value
=
init
(
shape
=
(
1
,
width
,
height
,
filters
),
dtype
=
tf
.
float32
))
y
=
tf
.
Variable
(
initial_value
=
init
(
shape
=
(
1
,
int
(
np
.
ceil
(
width
//
2
)),
int
(
np
.
ceil
(
height
//
2
)),
filters
),
dtype
=
tf
.
float32
))
with
tf
.
GradientTape
()
as
tape
:
x_hat
,
x_prev
=
test_layer
(
x
)
x_hat
=
path_layer
([
x_hat
,
x_prev
])
grad_loss
=
loss
(
x_hat
,
y
)
grad
=
tape
.
gradient
(
grad_loss
,
test_layer
.
trainable_variables
)
optimizer
.
apply_gradients
(
zip
(
grad
,
test_layer
.
trainable_variables
))
self
.
assertNotIn
(
None
,
grad
)
return
if
__name__
==
"__main__"
:
tf
.
test
.
main
()
tf
.
test
.
main
()
official/vision/beta/projects/yolo/modeling/tests/test_DarkConv.py
View file @
e43d75f3
...
...
@@ -5,67 +5,68 @@ from absl.testing import parameterized
from
official.vision.beta.projects.yolo.modeling.building_blocks
import
DarkConv
class
DarkConvTest
(
tf
.
test
.
TestCase
,
parameterized
.
TestCase
):
@
parameterized
.
named_parameters
((
"valid"
,
(
3
,
3
),
"valid"
,
(
1
,
1
)),
(
"same"
,
(
3
,
3
),
"same"
,
(
1
,
1
)),
(
"downsample"
,
(
3
,
3
),
"same"
,
(
2
,
2
)),
(
"test"
,
(
1
,
1
),
"valid"
,
(
1
,
1
)))
def
test_pass_through
(
self
,
kernel_size
,
padding
,
strides
):
if
padding
==
"same"
:
pad_const
=
1
else
:
pad_const
=
0
x
=
ks
.
Input
(
shape
=
(
224
,
224
,
3
))
test_layer
=
DarkConv
(
filters
=
64
,
kernel_size
=
kernel_size
,
padding
=
padding
,
strides
=
strides
,
trainable
=
False
)
outx
=
test_layer
(
x
)
print
(
outx
.
shape
.
as_list
())
test
=
[
None
,
int
((
224
-
kernel_size
[
0
]
+
(
2
*
pad_const
))
/
strides
[
0
]
+
1
),
int
((
224
-
kernel_size
[
1
]
+
(
2
*
pad_const
))
/
strides
[
1
]
+
1
),
64
]
print
(
test
)
self
.
assertAllEqual
(
outx
.
shape
.
as_list
(),
test
)
return
@
parameterized
.
named_parameters
((
"filters"
,
3
))
def
test_gradient_pass_though
(
self
,
filters
):
loss
=
ks
.
losses
.
MeanSquaredError
()
optimizer
=
ks
.
optimizers
.
SGD
()
with
tf
.
device
(
"/CPU:0"
):
test_layer
=
DarkConv
(
filters
,
kernel_size
=
(
3
,
3
),
padding
=
"same"
)
@
parameterized
.
named_parameters
(
(
"valid"
,
(
3
,
3
),
"valid"
,
(
1
,
1
)),
(
"same"
,
(
3
,
3
),
"same"
,
(
1
,
1
)),
(
"downsample"
,
(
3
,
3
),
"same"
,
(
2
,
2
)),
(
"test"
,
(
1
,
1
),
"valid"
,
(
1
,
1
)))
def
test_pass_through
(
self
,
kernel_size
,
padding
,
strides
):
if
padding
==
"same"
:
pad_const
=
1
else
:
pad_const
=
0
x
=
ks
.
Input
(
shape
=
(
224
,
224
,
3
))
test_layer
=
DarkConv
(
filters
=
64
,
kernel_size
=
kernel_size
,
padding
=
padding
,
strides
=
strides
,
trainable
=
False
)
outx
=
test_layer
(
x
)
print
(
outx
.
shape
.
as_list
())
test
=
[
None
,
int
((
224
-
kernel_size
[
0
]
+
(
2
*
pad_const
))
/
strides
[
0
]
+
1
),
int
((
224
-
kernel_size
[
1
]
+
(
2
*
pad_const
))
/
strides
[
1
]
+
1
),
64
]
print
(
test
)
self
.
assertAllEqual
(
outx
.
shape
.
as_list
(),
test
)
return
@
parameterized
.
named_parameters
((
"filters"
,
3
))
def
test_gradient_pass_though
(
self
,
filters
):
loss
=
ks
.
losses
.
MeanSquaredError
()
optimizer
=
ks
.
optimizers
.
SGD
()
with
tf
.
device
(
"/CPU:0"
):
test_layer
=
DarkConv
(
filters
,
kernel_size
=
(
3
,
3
),
padding
=
"same"
)
init
=
tf
.
random_normal_initializer
()
x
=
tf
.
Variable
(
initial_value
=
init
(
shape
=
(
1
,
224
,
224
,
3
),
dtype
=
tf
.
float32
))
y
=
tf
.
Variable
(
initial_value
=
init
(
shape
=
(
1
,
224
,
224
,
filters
),
dtype
=
tf
.
float32
))
init
=
tf
.
random_normal_initializer
()
x
=
tf
.
Variable
(
initial_value
=
init
(
shape
=
(
1
,
224
,
224
,
3
),
dtype
=
tf
.
float32
))
y
=
tf
.
Variable
(
initial_value
=
init
(
shape
=
(
1
,
224
,
224
,
filters
),
dtype
=
tf
.
float32
))
with
tf
.
GradientTape
()
as
tape
:
x_hat
=
test_layer
(
x
)
grad_loss
=
loss
(
x_hat
,
y
)
grad
=
tape
.
gradient
(
grad_loss
,
test_layer
.
trainable_variables
)
optimizer
.
apply_gradients
(
zip
(
grad
,
test_layer
.
trainable_variables
))
self
.
assertNotIn
(
None
,
grad
)
return
with
tf
.
GradientTape
()
as
tape
:
x_hat
=
test_layer
(
x
)
grad_loss
=
loss
(
x_hat
,
y
)
grad
=
tape
.
gradient
(
grad_loss
,
test_layer
.
trainable_variables
)
optimizer
.
apply_gradients
(
zip
(
grad
,
test_layer
.
trainable_variables
))
self
.
assertNotIn
(
None
,
grad
)
return
# @parameterized.named_parameters(("filters", 3), ("filters", 20), ("filters", 512))
# def test_time(self, filters):
# # finish the test for time
# dataset = tfds.load("mnist")
# model = ks.Sequential([
# DarkConv(7, kernel_size=(3,3), strides = (2,2), activation='relu'),
# DarkConv(10, kernel_size=(3,3), strides = (2,2), activation='relu'),
# DarkConv(filters, kernel_size=(3,3), strides = (1,1), activation='relu'),
# DarkConv(9, kernel_size=(3,3), strides = (2,2), activation='relu'),
# ks.layers.GlobalAveragePooling2D(),
# ks.layers.Dense(10, activation='softmax')], name='test')
# return
# @parameterized.named_parameters(("filters", 3), ("filters", 20), ("filters", 512))
# def test_time(self, filters):
# # finish the test for time
# dataset = tfds.load("mnist")
# model = ks.Sequential([
# DarkConv(7, kernel_size=(3,3), strides = (2,2), activation='relu'),
# DarkConv(10, kernel_size=(3,3), strides = (2,2), activation='relu'),
# DarkConv(filters, kernel_size=(3,3), strides = (1,1), activation='relu'),
# DarkConv(9, kernel_size=(3,3), strides = (2,2), activation='relu'),
# ks.layers.GlobalAveragePooling2D(),
# ks.layers.Dense(10, activation='softmax')], name='test')
# return
if
__name__
==
"__main__"
:
tf
.
test
.
main
()
tf
.
test
.
main
()
official/vision/beta/projects/yolo/modeling/tests/test_DarkResidual.py
View file @
e43d75f3
...
...
@@ -7,54 +7,55 @@ from official.vision.beta.projects.yolo.modeling.building_blocks import DarkResi
class
DarkResidualTest
(
tf
.
test
.
TestCase
,
parameterized
.
TestCase
):
@
parameterized
.
named_parameters
((
"same"
,
224
,
224
,
64
,
False
),
(
"downsample"
,
223
,
223
,
32
,
True
),
(
"oddball"
,
223
,
223
,
32
,
False
))
def
test_pass_through
(
self
,
width
,
height
,
filters
,
downsample
):
mod
=
1
if
downsample
:
mod
=
2
x
=
ks
.
Input
(
shape
=
(
width
,
height
,
filters
))
test_layer
=
layer
(
filters
=
filters
,
downsample
=
downsample
)
outx
=
test_layer
(
x
)
print
(
outx
)
print
(
outx
.
shape
.
as_list
())
self
.
assertAllEqual
(
outx
.
shape
.
as_list
(),
[
None
,
np
.
ceil
(
width
/
mod
),
np
.
ceil
(
height
/
mod
),
filters
])
return
@
parameterized
.
named_parameters
((
"same"
,
64
,
224
,
224
,
False
),
(
"downsample"
,
32
,
223
,
223
,
True
),
(
"oddball"
,
32
,
223
,
223
,
False
))
def
test_gradient_pass_though
(
self
,
filters
,
width
,
height
,
downsample
):
loss
=
ks
.
losses
.
MeanSquaredError
()
optimizer
=
ks
.
optimizers
.
SGD
()
test_layer
=
layer
(
filters
,
downsample
=
downsample
)
if
downsample
:
mod
=
2
else
:
mod
=
1
init
=
tf
.
random_normal_initializer
()
x
=
tf
.
Variable
(
initial_value
=
init
(
shape
=
(
1
,
width
,
height
,
filters
),
dtype
=
tf
.
float32
))
y
=
tf
.
Variable
(
initial_value
=
init
(
shape
=
(
1
,
int
(
np
.
ceil
(
width
/
mod
)),
int
(
np
.
ceil
(
height
/
mod
)),
filters
),
dtype
=
tf
.
float32
))
with
tf
.
GradientTape
()
as
tape
:
x_hat
=
test_layer
(
x
)
grad_loss
=
loss
(
x_hat
,
y
)
grad
=
tape
.
gradient
(
grad_loss
,
test_layer
.
trainable_variables
)
optimizer
.
apply_gradients
(
zip
(
grad
,
test_layer
.
trainable_variables
))
self
.
assertNotIn
(
None
,
grad
)
return
@
parameterized
.
named_parameters
((
"same"
,
224
,
224
,
64
,
False
),
(
"downsample"
,
223
,
223
,
32
,
True
),
(
"oddball"
,
223
,
223
,
32
,
False
))
def
test_pass_through
(
self
,
width
,
height
,
filters
,
downsample
):
mod
=
1
if
downsample
:
mod
=
2
x
=
ks
.
Input
(
shape
=
(
width
,
height
,
filters
))
test_layer
=
layer
(
filters
=
filters
,
downsample
=
downsample
)
outx
=
test_layer
(
x
)
print
(
outx
)
print
(
outx
.
shape
.
as_list
())
self
.
assertAllEqual
(
outx
.
shape
.
as_list
(),
[
None
,
np
.
ceil
(
width
/
mod
),
np
.
ceil
(
height
/
mod
),
filters
])
return
@
parameterized
.
named_parameters
((
"same"
,
64
,
224
,
224
,
False
),
(
"downsample"
,
32
,
223
,
223
,
True
),
(
"oddball"
,
32
,
223
,
223
,
False
))
def
test_gradient_pass_though
(
self
,
filters
,
width
,
height
,
downsample
):
loss
=
ks
.
losses
.
MeanSquaredError
()
optimizer
=
ks
.
optimizers
.
SGD
()
test_layer
=
layer
(
filters
,
downsample
=
downsample
)
if
downsample
:
mod
=
2
else
:
mod
=
1
init
=
tf
.
random_normal_initializer
()
x
=
tf
.
Variable
(
initial_value
=
init
(
shape
=
(
1
,
width
,
height
,
filters
),
dtype
=
tf
.
float32
))
y
=
tf
.
Variable
(
initial_value
=
init
(
shape
=
(
1
,
int
(
np
.
ceil
(
width
/
mod
)),
int
(
np
.
ceil
(
height
/
mod
)),
filters
),
dtype
=
tf
.
float32
))
with
tf
.
GradientTape
()
as
tape
:
x_hat
=
test_layer
(
x
)
grad_loss
=
loss
(
x_hat
,
y
)
grad
=
tape
.
gradient
(
grad_loss
,
test_layer
.
trainable_variables
)
optimizer
.
apply_gradients
(
zip
(
grad
,
test_layer
.
trainable_variables
))
self
.
assertNotIn
(
None
,
grad
)
return
if
__name__
==
"__main__"
:
tf
.
test
.
main
()
tf
.
test
.
main
()
official/vision/beta/projects/yolo/modeling/tests/test_DarkTiny.py
View file @
e43d75f3
...
...
@@ -7,44 +7,44 @@ from official.vision.beta.projects.yolo.modeling.building_blocks import DarkTiny
class
DarkTinyTest
(
tf
.
test
.
TestCase
,
parameterized
.
TestCase
):
@
parameterized
.
named_parameters
((
"middle"
,
224
,
224
,
64
,
2
),
(
"last
"
,
224
,
224
,
1024
,
1
))
def
test_pass_through
(
self
,
width
,
height
,
filters
,
strides
):
x
=
ks
.
Input
(
shape
=
(
width
,
height
,
filters
))
test_layer
=
DarkTiny
(
filters
=
filters
,
strides
=
stride
s
)
outx
=
test_layer
(
x
)
print
(
out
x
)
print
(
outx
.
shape
.
as_list
()
)
self
.
assertEqual
(
width
%
strides
,
0
,
msg
=
"width % strides != 0"
)
self
.
assertEqual
(
height
%
strides
,
0
,
msg
=
"
height
% strides != 0"
)
self
.
assert
All
Equal
(
outx
.
shape
.
as_list
(),
[
None
,
width
//
strides
,
height
//
strides
,
filters
])
return
@
parameterized
.
named_parameters
((
"middle"
,
224
,
224
,
64
,
2
),
(
"last"
,
224
,
224
,
1024
,
1
))
def
test_gradient_pass_though
(
self
,
width
,
height
,
filters
,
strides
):
loss
=
ks
.
losses
.
MeanSquaredError
()
optimizer
=
ks
.
optimizers
.
SGD
()
test_layer
=
DarkTiny
(
filters
=
filters
,
strides
=
strides
)
init
=
tf
.
random_normal_initializer
()
x
=
tf
.
Variable
(
initial_value
=
init
(
shape
=
(
1
,
width
,
height
,
filters
),
dtype
=
tf
.
float32
))
y
=
tf
.
Variable
(
initial_value
=
init
(
shape
=
(
1
,
width
//
strides
,
height
//
strides
,
filters
),
dtype
=
tf
.
float32
))
with
tf
.
GradientTape
()
as
tape
:
x_hat
=
test_layer
(
x
)
grad_loss
=
loss
(
x_hat
,
y
)
grad
=
tape
.
gradient
(
grad_loss
,
test_layer
.
trainable_variables
)
optimizer
.
apply_gradients
(
zip
(
grad
,
test_layer
.
trainable_variables
))
self
.
assertNotIn
(
None
,
grad
)
return
@
parameterized
.
named_parameters
((
"middle
"
,
224
,
224
,
64
,
2
),
(
"last"
,
224
,
224
,
1024
,
1
))
def
test_pass_through
(
self
,
width
,
height
,
filters
,
strides
):
x
=
ks
.
Input
(
shape
=
(
width
,
height
,
filter
s
)
)
test_layer
=
DarkTiny
(
filters
=
filters
,
strides
=
strides
)
outx
=
test_layer
(
x
)
print
(
outx
)
print
(
outx
.
shape
.
as_list
()
)
self
.
assertEqual
(
width
%
strides
,
0
,
msg
=
"
width
% strides != 0"
)
self
.
assertEqual
(
height
%
strides
,
0
,
msg
=
"height % strides != 0"
)
self
.
assertAllEqual
(
outx
.
shape
.
as_list
(),
[
None
,
width
//
strides
,
height
//
strides
,
filters
])
return
@
parameterized
.
named_parameters
((
"middle"
,
224
,
224
,
64
,
2
),
(
"last"
,
224
,
224
,
1024
,
1
))
def
test_gradient_pass_though
(
self
,
width
,
height
,
filters
,
strides
):
loss
=
ks
.
losses
.
MeanSquaredError
()
optimizer
=
ks
.
optimizers
.
SGD
()
test_layer
=
DarkTiny
(
filters
=
filters
,
strides
=
strides
)
init
=
tf
.
random_normal_initializer
()
x
=
tf
.
Variable
(
initial_value
=
init
(
shape
=
(
1
,
width
,
height
,
filters
),
dtype
=
tf
.
float32
))
y
=
tf
.
Variable
(
initial_value
=
init
(
shape
=
(
1
,
width
//
strides
,
height
//
strides
,
filters
),
dtype
=
tf
.
float32
))
with
tf
.
GradientTape
()
as
tape
:
x_hat
=
test_layer
(
x
)
grad_loss
=
loss
(
x_hat
,
y
)
grad
=
tape
.
gradient
(
grad_loss
,
test_layer
.
trainable_variables
)
optimizer
.
apply_gradients
(
zip
(
grad
,
test_layer
.
trainable_variables
))
self
.
assertNotIn
(
None
,
grad
)
return
if
__name__
==
"__main__"
:
tf
.
test
.
main
()
tf
.
test
.
main
()
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment