Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ModelZoo
ResNet50_tensorflow
Commits
e43d75f3
Commit
e43d75f3
authored
Oct 21, 2020
by
anivegesana
Browse files
Lint YOLO backbone and building block code
parent
5122a448
Changes
14
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
14 changed files
with
1117 additions
and
1085 deletions
+1117
-1085
official/vision/beta/projects/yolo/configs/backbones.py
official/vision/beta/projects/yolo/configs/backbones.py
+5
-3
official/vision/beta/projects/yolo/modeling/backbones/Darknet.py
...l/vision/beta/projects/yolo/modeling/backbones/Darknet.py
+267
-245
official/vision/beta/projects/yolo/modeling/building_blocks/_CSPConnect.py
...eta/projects/yolo/modeling/building_blocks/_CSPConnect.py
+63
-62
official/vision/beta/projects/yolo/modeling/building_blocks/_CSPDownSample.py
.../projects/yolo/modeling/building_blocks/_CSPDownSample.py
+73
-72
official/vision/beta/projects/yolo/modeling/building_blocks/_CSPTiny.py
...n/beta/projects/yolo/modeling/building_blocks/_CSPTiny.py
+138
-136
official/vision/beta/projects/yolo/modeling/building_blocks/_DarkConv.py
.../beta/projects/yolo/modeling/building_blocks/_DarkConv.py
+136
-137
official/vision/beta/projects/yolo/modeling/building_blocks/_DarkResidual.py
...a/projects/yolo/modeling/building_blocks/_DarkResidual.py
+118
-118
official/vision/beta/projects/yolo/modeling/building_blocks/_DarkTiny.py
.../beta/projects/yolo/modeling/building_blocks/_DarkTiny.py
+88
-86
official/vision/beta/projects/yolo/modeling/building_blocks/__init__.py
...n/beta/projects/yolo/modeling/building_blocks/__init__.py
+0
-1
official/vision/beta/projects/yolo/modeling/tests/test_CSPConnect.py
...sion/beta/projects/yolo/modeling/tests/test_CSPConnect.py
+43
-42
official/vision/beta/projects/yolo/modeling/tests/test_CSPDownSample.py
...n/beta/projects/yolo/modeling/tests/test_CSPDownSample.py
+42
-41
official/vision/beta/projects/yolo/modeling/tests/test_DarkConv.py
...vision/beta/projects/yolo/modeling/tests/test_DarkConv.py
+57
-56
official/vision/beta/projects/yolo/modeling/tests/test_DarkResidual.py
...on/beta/projects/yolo/modeling/tests/test_DarkResidual.py
+49
-48
official/vision/beta/projects/yolo/modeling/tests/test_DarkTiny.py
...vision/beta/projects/yolo/modeling/tests/test_DarkTiny.py
+38
-38
No files found.
official/vision/beta/projects/yolo/configs/backbones.py
View file @
e43d75f3
...
@@ -6,11 +6,13 @@ from official.modeling import hyperparams
...
@@ -6,11 +6,13 @@ from official.modeling import hyperparams
from
official.vision.beta.configs
import
backbones
from
official.vision.beta.configs
import
backbones
@
dataclasses
.
dataclass
@
dataclasses
.
dataclass
class
DarkNet
(
hyperparams
.
Config
):
class
DarkNet
(
hyperparams
.
Config
):
"""DarkNet config."""
"""DarkNet config."""
model_id
:
str
=
"darknet53"
model_id
:
str
=
"darknet53"
@
dataclasses
.
dataclass
@
dataclasses
.
dataclass
class
Backbone
(
backbones
.
Backbone
):
class
Backbone
(
backbones
.
Backbone
):
darknet
:
DarkNet
=
DarkNet
()
darknet
:
DarkNet
=
DarkNet
()
official/vision/beta/projects/yolo/modeling/backbones/Darknet.py
View file @
e43d75f3
This diff is collapsed.
Click to expand it.
official/vision/beta/projects/yolo/modeling/building_blocks/_CSPConnect.py
View file @
e43d75f3
...
@@ -5,69 +5,70 @@ from ._DarkConv import DarkConv
...
@@ -5,69 +5,70 @@ from ._DarkConv import DarkConv
@
ks
.
utils
.
register_keras_serializable
(
package
=
'yolo'
)
@
ks
.
utils
.
register_keras_serializable
(
package
=
'yolo'
)
class
CSPConnect
(
ks
.
layers
.
Layer
):
class
CSPConnect
(
ks
.
layers
.
Layer
):
def
__init__
(
self
,
filters
,
filter_reduce
=
2
,
activation
=
"mish"
,
kernel_initializer
=
'glorot_uniform'
,
bias_initializer
=
'zeros'
,
bias_regularizer
=
None
,
weight_decay
=
None
,
# default find where is it is stated
use_bn
=
True
,
use_sync_bn
=
False
,
norm_momentum
=
0.99
,
norm_epsilon
=
0.001
,
**
kwargs
):
super
().
__init__
(
**
kwargs
)
def
__init__
(
#layer params
self
,
self
.
_filters
=
filters
filters
,
self
.
_filter_reduce
=
filter_reduce
filter_reduce
=
2
,
self
.
_activation
=
activation
activation
=
"mish"
,
kernel_initializer
=
'glorot_uniform'
,
bias_initializer
=
'zeros'
,
bias_regularizer
=
None
,
weight_decay
=
None
,
# default find where is it is stated
use_bn
=
True
,
use_sync_bn
=
False
,
norm_momentum
=
0.99
,
norm_epsilon
=
0.001
,
**
kwargs
):
#convoultion params
super
().
__init__
(
**
kwargs
)
self
.
_kernel_initializer
=
kernel_initializer
#layer params
self
.
_bias_initializer
=
bias_initializer
self
.
_filters
=
filters
self
.
_weight_decay
=
weight_decay
self
.
_filter_reduce
=
filter_reduce
self
.
_bias_regularizer
=
bias_regularizer
self
.
_activation
=
activation
self
.
_use_bn
=
use_bn
self
.
_use_sync_bn
=
use_sync_bn
self
.
_norm_moment
=
norm_momentum
self
.
_norm_epsilon
=
norm_epsilon
return
def
build
(
self
,
input_shape
):
#convoultion params
self
.
_conv1
=
DarkConv
(
filters
=
self
.
_filters
//
self
.
_filter_reduce
,
self
.
_kernel_initializer
=
kernel_initializer
kernel_size
=
(
1
,
1
),
self
.
_bias_initializer
=
bias_initializer
strides
=
(
1
,
1
),
self
.
_weight_decay
=
weight_decay
kernel_initializer
=
self
.
_kernel_initializer
,
self
.
_bias_regularizer
=
bias_regularizer
bias_initializer
=
self
.
_bias_initializer
,
self
.
_use_bn
=
use_bn
bias_regularizer
=
self
.
_bias_regularizer
,
self
.
_use_sync_bn
=
use_sync_bn
weight_decay
=
self
.
_weight_decay
,
self
.
_norm_moment
=
norm_momentum
use_bn
=
self
.
_use_bn
,
self
.
_norm_epsilon
=
norm_epsilon
use_sync_bn
=
self
.
_use_sync_bn
,
return
norm_momentum
=
self
.
_norm_moment
,
norm_epsilon
=
self
.
_norm_epsilon
,
activation
=
self
.
_activation
)
self
.
_concat
=
ks
.
layers
.
Concatenate
(
axis
=-
1
)
self
.
_conv2
=
DarkConv
(
filters
=
self
.
_filters
,
kernel_size
=
(
1
,
1
),
strides
=
(
1
,
1
),
kernel_initializer
=
self
.
_kernel_initializer
,
bias_initializer
=
self
.
_bias_initializer
,
bias_regularizer
=
self
.
_bias_regularizer
,
weight_decay
=
self
.
_weight_decay
,
use_bn
=
self
.
_use_bn
,
use_sync_bn
=
self
.
_use_sync_bn
,
norm_momentum
=
self
.
_norm_moment
,
norm_epsilon
=
self
.
_norm_epsilon
,
activation
=
self
.
_activation
)
return
def
call
(
self
,
inputs
):
def
build
(
self
,
input_shape
):
x_prev
,
x_csp
=
inputs
self
.
_conv1
=
DarkConv
(
filters
=
self
.
_filters
//
self
.
_filter_reduce
,
x
=
self
.
_conv1
(
x_prev
)
kernel_size
=
(
1
,
1
),
x
=
self
.
_concat
([
x
,
x_csp
])
strides
=
(
1
,
1
),
x
=
self
.
_conv2
(
x
)
kernel_initializer
=
self
.
_kernel_initializer
,
return
x
bias_initializer
=
self
.
_bias_initializer
,
bias_regularizer
=
self
.
_bias_regularizer
,
weight_decay
=
self
.
_weight_decay
,
use_bn
=
self
.
_use_bn
,
use_sync_bn
=
self
.
_use_sync_bn
,
norm_momentum
=
self
.
_norm_moment
,
norm_epsilon
=
self
.
_norm_epsilon
,
activation
=
self
.
_activation
)
self
.
_concat
=
ks
.
layers
.
Concatenate
(
axis
=-
1
)
self
.
_conv2
=
DarkConv
(
filters
=
self
.
_filters
,
kernel_size
=
(
1
,
1
),
strides
=
(
1
,
1
),
kernel_initializer
=
self
.
_kernel_initializer
,
bias_initializer
=
self
.
_bias_initializer
,
bias_regularizer
=
self
.
_bias_regularizer
,
weight_decay
=
self
.
_weight_decay
,
use_bn
=
self
.
_use_bn
,
use_sync_bn
=
self
.
_use_sync_bn
,
norm_momentum
=
self
.
_norm_moment
,
norm_epsilon
=
self
.
_norm_epsilon
,
activation
=
self
.
_activation
)
return
def
call
(
self
,
inputs
):
x_prev
,
x_csp
=
inputs
x
=
self
.
_conv1
(
x_prev
)
x
=
self
.
_concat
([
x
,
x_csp
])
x
=
self
.
_conv2
(
x
)
return
x
official/vision/beta/projects/yolo/modeling/building_blocks/_CSPDownSample.py
View file @
e43d75f3
...
@@ -5,80 +5,81 @@ from ._DarkConv import DarkConv
...
@@ -5,80 +5,81 @@ from ._DarkConv import DarkConv
@
ks
.
utils
.
register_keras_serializable
(
package
=
'yolo'
)
@
ks
.
utils
.
register_keras_serializable
(
package
=
'yolo'
)
class
CSPDownSample
(
ks
.
layers
.
Layer
):
class
CSPDownSample
(
ks
.
layers
.
Layer
):
def
__init__
(
self
,
filters
,
filter_reduce
=
2
,
activation
=
"mish"
,
kernel_initializer
=
'glorot_uniform'
,
bias_initializer
=
'zeros'
,
bias_regularizer
=
None
,
weight_decay
=
None
,
# default find where is it is stated
use_bn
=
True
,
use_sync_bn
=
False
,
norm_momentum
=
0.99
,
norm_epsilon
=
0.001
,
**
kwargs
):
super
().
__init__
(
**
kwargs
)
def
__init__
(
#layer params
self
,
self
.
_filters
=
filters
filters
,
self
.
_filter_reduce
=
filter_reduce
filter_reduce
=
2
,
self
.
_activation
=
activation
activation
=
"mish"
,
kernel_initializer
=
'glorot_uniform'
,
bias_initializer
=
'zeros'
,
bias_regularizer
=
None
,
weight_decay
=
None
,
# default find where is it is stated
use_bn
=
True
,
use_sync_bn
=
False
,
norm_momentum
=
0.99
,
norm_epsilon
=
0.001
,
**
kwargs
):
#convoultion params
super
().
__init__
(
**
kwargs
)
self
.
_kernel_initializer
=
kernel_initializer
#layer params
self
.
_bias_initializer
=
bias_initializer
self
.
_filters
=
filters
self
.
_weight_decay
=
weight_decay
self
.
_filter_reduce
=
filter_reduce
self
.
_bias_regularizer
=
bias_regularizer
self
.
_activation
=
activation
self
.
_use_bn
=
use_bn
self
.
_use_sync_bn
=
use_sync_bn
self
.
_norm_moment
=
norm_momentum
self
.
_norm_epsilon
=
norm_epsilon
return
def
build
(
self
,
input_shape
):
#convoultion params
self
.
_conv1
=
DarkConv
(
filters
=
self
.
_filters
,
self
.
_kernel_initializer
=
kernel_initializer
kernel_size
=
(
3
,
3
),
self
.
_bias_initializer
=
bias_initializer
strides
=
(
2
,
2
),
self
.
_weight_decay
=
weight_decay
kernel_initializer
=
self
.
_kernel_initializer
,
self
.
_bias_regularizer
=
bias_regularizer
bias_initializer
=
self
.
_bias_initializer
,
self
.
_use_bn
=
use_bn
bias_regularizer
=
self
.
_bias_regularizer
,
self
.
_use_sync_bn
=
use_sync_bn
weight_decay
=
self
.
_weight_decay
,
self
.
_norm_moment
=
norm_momentum
use_bn
=
self
.
_use_bn
,
self
.
_norm_epsilon
=
norm_epsilon
use_sync_bn
=
self
.
_use_sync_bn
,
return
norm_momentum
=
self
.
_norm_moment
,
norm_epsilon
=
self
.
_norm_epsilon
,
activation
=
self
.
_activation
)
self
.
_conv2
=
DarkConv
(
filters
=
self
.
_filters
//
self
.
_filter_reduce
,
kernel_size
=
(
1
,
1
),
strides
=
(
1
,
1
),
kernel_initializer
=
self
.
_kernel_initializer
,
bias_initializer
=
self
.
_bias_initializer
,
bias_regularizer
=
self
.
_bias_regularizer
,
weight_decay
=
self
.
_weight_decay
,
use_bn
=
self
.
_use_bn
,
use_sync_bn
=
self
.
_use_sync_bn
,
norm_momentum
=
self
.
_norm_moment
,
norm_epsilon
=
self
.
_norm_epsilon
,
activation
=
self
.
_activation
)
self
.
_conv3
=
DarkConv
(
filters
=
self
.
_filters
//
self
.
_filter_reduce
,
def
build
(
self
,
input_shape
):
kernel_size
=
(
1
,
1
),
self
.
_conv1
=
DarkConv
(
filters
=
self
.
_filters
,
strides
=
(
1
,
1
),
kernel_size
=
(
3
,
3
),
kernel_initializer
=
self
.
_kernel_initializer
,
strides
=
(
2
,
2
),
bias_initializer
=
self
.
_bias_initializer
,
kernel_initializer
=
self
.
_kernel_initializer
,
bias_regularizer
=
self
.
_bias_regularizer
,
bias_initializer
=
self
.
_bias_initializer
,
weight_decay
=
self
.
_weight_decay
,
bias_regularizer
=
self
.
_bias_regularizer
,
use_bn
=
self
.
_use_bn
,
weight_decay
=
self
.
_weight_decay
,
use_sync_bn
=
self
.
_use_sync_bn
,
use_bn
=
self
.
_use_bn
,
norm_momentum
=
self
.
_norm_moment
,
use_sync_bn
=
self
.
_use_sync_bn
,
norm_epsilon
=
self
.
_norm_epsilon
,
norm_momentum
=
self
.
_norm_moment
,
activation
=
self
.
_activation
)
norm_epsilon
=
self
.
_norm_epsilon
,
return
activation
=
self
.
_activation
)
self
.
_conv2
=
DarkConv
(
filters
=
self
.
_filters
//
self
.
_filter_reduce
,
kernel_size
=
(
1
,
1
),
strides
=
(
1
,
1
),
kernel_initializer
=
self
.
_kernel_initializer
,
bias_initializer
=
self
.
_bias_initializer
,
bias_regularizer
=
self
.
_bias_regularizer
,
weight_decay
=
self
.
_weight_decay
,
use_bn
=
self
.
_use_bn
,
use_sync_bn
=
self
.
_use_sync_bn
,
norm_momentum
=
self
.
_norm_moment
,
norm_epsilon
=
self
.
_norm_epsilon
,
activation
=
self
.
_activation
)
def
call
(
self
,
inputs
):
self
.
_conv3
=
DarkConv
(
filters
=
self
.
_filters
//
self
.
_filter_reduce
,
x
=
self
.
_conv1
(
inputs
)
kernel_size
=
(
1
,
1
),
y
=
self
.
_conv2
(
x
)
strides
=
(
1
,
1
),
x
=
self
.
_conv3
(
x
)
kernel_initializer
=
self
.
_kernel_initializer
,
return
(
x
,
y
)
bias_initializer
=
self
.
_bias_initializer
,
bias_regularizer
=
self
.
_bias_regularizer
,
weight_decay
=
self
.
_weight_decay
,
use_bn
=
self
.
_use_bn
,
use_sync_bn
=
self
.
_use_sync_bn
,
norm_momentum
=
self
.
_norm_moment
,
norm_epsilon
=
self
.
_norm_epsilon
,
activation
=
self
.
_activation
)
return
def
call
(
self
,
inputs
):
x
=
self
.
_conv1
(
inputs
)
y
=
self
.
_conv2
(
x
)
x
=
self
.
_conv3
(
x
)
return
(
x
,
y
)
official/vision/beta/projects/yolo/modeling/building_blocks/_CSPTiny.py
View file @
e43d75f3
...
@@ -3,152 +3,154 @@ import tensorflow as tf
...
@@ -3,152 +3,154 @@ import tensorflow as tf
import
tensorflow.keras
as
ks
import
tensorflow.keras
as
ks
from
._DarkConv
import
DarkConv
from
._DarkConv
import
DarkConv
@
ks
.
utils
.
register_keras_serializable
(
package
=
'yolo'
)
@
ks
.
utils
.
register_keras_serializable
(
package
=
'yolo'
)
class
CSPTiny
(
ks
.
layers
.
Layer
):
class
CSPTiny
(
ks
.
layers
.
Layer
):
def
__init__
(
self
,
filters
=
1
,
use_bias
=
True
,
kernel_initializer
=
'glorot_uniform'
,
bias_initializer
=
'zeros'
,
bias_regularizer
=
None
,
weight_decay
=
None
,
# default find where is it is stated
use_bn
=
True
,
use_sync_bn
=
False
,
group_id
=
1
,
groups
=
2
,
norm_momentum
=
0.99
,
norm_epsilon
=
0.001
,
activation
=
'leaky'
,
downsample
=
True
,
leaky_alpha
=
0.1
,
**
kwargs
):
# darkconv params
def
__init__
(
self
.
_filters
=
filters
self
,
self
.
_use_bias
=
use_bias
filters
=
1
,
self
.
_kernel_initializer
=
kernel_initializer
use_bias
=
True
,
self
.
_bias_initializer
=
bias_initializer
kernel_initializer
=
'glorot_uniform'
,
self
.
_bias_regularizer
=
bias_regularizer
bias_initializer
=
'zeros'
,
self
.
_use_bn
=
use_bn
bias_regularizer
=
None
,
self
.
_use_sync_bn
=
use_sync_bn
weight_decay
=
None
,
# default find where is it is stated
self
.
_weight_decay
=
weight_decay
use_bn
=
True
,
self
.
_groups
=
groups
use_sync_bn
=
False
,
self
.
_group_id
=
group_id
group_id
=
1
,
self
.
_downsample
=
downsample
groups
=
2
,
norm_momentum
=
0.99
,
norm_epsilon
=
0.001
,
activation
=
'leaky'
,
downsample
=
True
,
leaky_alpha
=
0.1
,
**
kwargs
):
# darkconv params
self
.
_filters
=
filters
self
.
_use_bias
=
use_bias
self
.
_kernel_initializer
=
kernel_initializer
self
.
_bias_initializer
=
bias_initializer
self
.
_bias_regularizer
=
bias_regularizer
self
.
_use_bn
=
use_bn
self
.
_use_sync_bn
=
use_sync_bn
self
.
_weight_decay
=
weight_decay
self
.
_groups
=
groups
self
.
_group_id
=
group_id
self
.
_downsample
=
downsample
# normal params
self
.
_norm_moment
=
norm_momentum
self
.
_norm_epsilon
=
norm_epsilon
#
normal
params
#
activation
params
self
.
_
norm_moment
=
norm_momentum
self
.
_
conv_activation
=
activation
self
.
_
norm_epsilon
=
norm_epsilon
self
.
_
leaky_alpha
=
leaky_alpha
# activation params
super
().
__init__
(
**
kwargs
)
self
.
_conv_activation
=
activation
return
self
.
_leaky_alpha
=
leaky_alpha
super
().
__init__
(
**
kwargs
)
def
build
(
self
,
input_shape
):
return
self
.
_convlayer1
=
DarkConv
(
filters
=
self
.
_filters
,
kernel_size
=
(
3
,
3
),
strides
=
(
1
,
1
),
padding
=
'same'
,
use_bias
=
self
.
_use_bias
,
kernel_initializer
=
self
.
_kernel_initializer
,
bias_initializer
=
self
.
_bias_initializer
,
bias_regularizer
=
self
.
_bias_regularizer
,
weight_decay
=
self
.
_weight_decay
,
use_bn
=
self
.
_use_bn
,
use_sync_bn
=
self
.
_use_sync_bn
,
norm_momentum
=
self
.
_norm_moment
,
norm_epsilon
=
self
.
_norm_epsilon
,
activation
=
self
.
_conv_activation
,
leaky_alpha
=
self
.
_leaky_alpha
)
def
build
(
self
,
input_shape
):
self
.
_convlayer2
=
DarkConv
(
filters
=
self
.
_filters
//
2
,
self
.
_convlayer1
=
DarkConv
(
filters
=
self
.
_filters
,
kernel_size
=
(
3
,
3
),
kernel_size
=
(
3
,
3
),
strides
=
(
1
,
1
),
strides
=
(
1
,
1
),
padding
=
'same'
,
padding
=
'same'
,
use_bias
=
self
.
_use_bias
,
use_bias
=
self
.
_use_bias
,
kernel_initializer
=
self
.
_kernel_initializer
,
kernel_initializer
=
self
.
_kernel_initializer
,
bias_initializer
=
self
.
_bias_initializer
,
bias_initializer
=
self
.
_bias_initializer
,
bias_regularizer
=
self
.
_bias_regularizer
,
bias_regularizer
=
self
.
_bias_regularizer
,
weight_decay
=
self
.
_weight_decay
,
weight_decay
=
self
.
_weight_decay
,
use_bn
=
self
.
_use_bn
,
use_bn
=
self
.
_use_bn
,
use_sync_bn
=
self
.
_use_sync_bn
,
use_sync_bn
=
self
.
_use_sync_bn
,
norm_momentum
=
self
.
_norm_moment
,
norm_momentum
=
self
.
_norm_moment
,
norm_epsilon
=
self
.
_norm_epsilon
,
norm_epsilon
=
self
.
_norm_epsilon
,
activation
=
self
.
_conv_activation
,
activation
=
self
.
_conv_activation
,
leaky_alpha
=
self
.
_leaky_alpha
)
leaky_alpha
=
self
.
_leaky_alpha
)
self
.
_convlayer2
=
DarkConv
(
filters
=
self
.
_filters
//
2
,
self
.
_convlayer3
=
DarkConv
(
filters
=
self
.
_filters
//
2
,
kernel_size
=
(
3
,
3
),
kernel_size
=
(
3
,
3
),
strides
=
(
1
,
1
),
strides
=
(
1
,
1
),
padding
=
'same'
,
padding
=
'same'
,
use_bias
=
self
.
_use_bias
,
use_bias
=
self
.
_use_bias
,
kernel_initializer
=
self
.
_kernel_initializer
,
kernel_initializer
=
self
.
_kernel_initializer
,
bias_initializer
=
self
.
_bias_initializer
,
bias_initializer
=
self
.
_bias_initializer
,
bias_regularizer
=
self
.
_bias_regularizer
,
bias_regularizer
=
self
.
_bias_regularizer
,
weight_decay
=
self
.
_weight_decay
,
weight_decay
=
self
.
_weight_decay
,
use_bn
=
self
.
_use_bn
,
use_bn
=
self
.
_use_bn
,
use_sync_bn
=
self
.
_use_sync_bn
,
use_sync_bn
=
self
.
_use_sync_bn
,
norm_momentum
=
self
.
_norm_moment
,
norm_momentum
=
self
.
_norm_moment
,
norm_epsilon
=
self
.
_norm_epsilon
,
norm_epsilon
=
self
.
_norm_epsilon
,
activation
=
self
.
_conv_activation
,
activation
=
self
.
_conv_activation
,
leaky_alpha
=
self
.
_leaky_alpha
)
leaky_alpha
=
self
.
_leaky_alpha
)
self
.
_convlayer3
=
DarkConv
(
filters
=
self
.
_filters
//
2
,
kernel_size
=
(
3
,
3
),
strides
=
(
1
,
1
),
padding
=
'same'
,
use_bias
=
self
.
_use_bias
,
kernel_initializer
=
self
.
_kernel_initializer
,
bias_initializer
=
self
.
_bias_initializer
,
bias_regularizer
=
self
.
_bias_regularizer
,
weight_decay
=
self
.
_weight_decay
,
use_bn
=
self
.
_use_bn
,
use_sync_bn
=
self
.
_use_sync_bn
,
norm_momentum
=
self
.
_norm_moment
,
norm_epsilon
=
self
.
_norm_epsilon
,
activation
=
self
.
_conv_activation
,
leaky_alpha
=
self
.
_leaky_alpha
)
self
.
_convlayer4
=
DarkConv
(
filters
=
self
.
_filters
,
self
.
_convlayer4
=
DarkConv
(
filters
=
self
.
_filters
,
kernel_size
=
(
1
,
1
),
kernel_size
=
(
1
,
1
),
strides
=
(
1
,
1
),
strides
=
(
1
,
1
),
padding
=
'same'
,
padding
=
'same'
,
use_bias
=
self
.
_use_bias
,
use_bias
=
self
.
_use_bias
,
kernel_initializer
=
self
.
_kernel_initializer
,
kernel_initializer
=
self
.
_kernel_initializer
,
bias_initializer
=
self
.
_bias_initializer
,
bias_initializer
=
self
.
_bias_initializer
,
bias_regularizer
=
self
.
_bias_regularizer
,
bias_regularizer
=
self
.
_bias_regularizer
,
weight_decay
=
self
.
_weight_decay
,
weight_decay
=
self
.
_weight_decay
,
use_bn
=
self
.
_use_bn
,
use_bn
=
self
.
_use_bn
,
use_sync_bn
=
self
.
_use_sync_bn
,
use_sync_bn
=
self
.
_use_sync_bn
,
norm_momentum
=
self
.
_norm_moment
,
norm_momentum
=
self
.
_norm_moment
,
norm_epsilon
=
self
.
_norm_epsilon
,
norm_epsilon
=
self
.
_norm_epsilon
,
activation
=
self
.
_conv_activation
,
activation
=
self
.
_conv_activation
,
leaky_alpha
=
self
.
_leaky_alpha
)
leaky_alpha
=
self
.
_leaky_alpha
)
self
.
_maxpool
=
tf
.
keras
.
layers
.
MaxPool2D
(
pool_size
=
2
,
self
.
_maxpool
=
tf
.
keras
.
layers
.
MaxPool2D
(
pool_size
=
2
,
strides
=
2
,
strides
=
2
,
padding
=
"same"
,
padding
=
"same"
,
data_format
=
None
)
data_format
=
None
)
super
().
build
(
input_shape
)
super
().
build
(
input_shape
)
return
return
def
call
(
self
,
inputs
):
def
call
(
self
,
inputs
):
x1
=
self
.
_convlayer1
(
inputs
)
x1
=
self
.
_convlayer1
(
inputs
)
x2
=
tf
.
split
(
x1
,
self
.
_groups
,
axis
=
-
1
)
x2
=
tf
.
split
(
x1
,
self
.
_groups
,
axis
=
-
1
)
x3
=
self
.
_convlayer2
(
x2
[
self
.
_group_id
])
x3
=
self
.
_convlayer2
(
x2
[
self
.
_group_id
])
x4
=
self
.
_convlayer3
(
x3
)
x4
=
self
.
_convlayer3
(
x3
)
x5
=
tf
.
concat
([
x4
,
x3
],
axis
=
-
1
)
x5
=
tf
.
concat
([
x4
,
x3
],
axis
=
-
1
)
x6
=
self
.
_convlayer4
(
x5
)
x6
=
self
.
_convlayer4
(
x5
)
x
=
tf
.
concat
([
x1
,
x6
],
axis
=
-
1
)
x
=
tf
.
concat
([
x1
,
x6
],
axis
=-
1
)
if
self
.
_downsample
:
if
self
.
_downsample
:
x
=
self
.
_maxpool
(
x
)
x
=
self
.
_maxpool
(
x
)
return
x
,
x6
return
x
,
x6
def
get_config
(
self
):
def
get_config
(
self
):
# used to store/share parameters to reconsturct the model
# used to store/share parameters to reconsturct the model
layer_config
=
{
layer_config
=
{
"filters"
:
self
.
_filters
,
"filters"
:
self
.
_filters
,
"use_bias"
:
self
.
_use_bias
,
"use_bias"
:
self
.
_use_bias
,
"strides"
:
self
.
_strides
,
"strides"
:
self
.
_strides
,
"kernel_initializer"
:
self
.
_kernel_initializer
,
"kernel_initializer"
:
self
.
_kernel_initializer
,
"bias_initializer"
:
self
.
_bias_initializer
,
"bias_initializer"
:
self
.
_bias_initializer
,
"weight_decay"
:
self
.
_weight_decay
,
"weight_decay"
:
self
.
_weight_decay
,
"use_bn"
:
self
.
_use_bn
,
"use_bn"
:
self
.
_use_bn
,
"use_sync_bn"
:
self
.
_use_sync_bn
,
"use_sync_bn"
:
self
.
_use_sync_bn
,
"norm_moment"
:
self
.
_norm_moment
,
"norm_moment"
:
self
.
_norm_moment
,
"norm_epsilon"
:
self
.
_norm_epsilon
,
"norm_epsilon"
:
self
.
_norm_epsilon
,
"activation"
:
self
.
_conv_activation
,
"activation"
:
self
.
_conv_activation
,
"leaky_alpha"
:
self
.
_leaky_alpha
,
"leaky_alpha"
:
self
.
_leaky_alpha
,
"sc_activation"
:
self
.
_sc_activation
,
"sc_activation"
:
self
.
_sc_activation
,
}
}
layer_config
.
update
(
super
().
get_config
())
layer_config
.
update
(
super
().
get_config
())
return
layer_config
return
layer_config
official/vision/beta/projects/yolo/modeling/building_blocks/_DarkConv.py
View file @
e43d75f3
...
@@ -11,26 +11,27 @@ from yolo.modeling.functions.mish_activation import mish
...
@@ -11,26 +11,27 @@ from yolo.modeling.functions.mish_activation import mish
@
ks
.
utils
.
register_keras_serializable
(
package
=
'yolo'
)
@
ks
.
utils
.
register_keras_serializable
(
package
=
'yolo'
)
class
DarkConv
(
ks
.
layers
.
Layer
):
class
DarkConv
(
ks
.
layers
.
Layer
):
def
__init__
(
self
,
def
__init__
(
filters
=
1
,
self
,
kernel_size
=
(
1
,
1
),
filters
=
1
,
strides
=
(
1
,
1
),
kernel_size
=
(
1
,
1
),
padding
=
'same'
,
strides
=
(
1
,
1
),
dilation_rate
=
(
1
,
1
),
padding
=
'same'
,
use_bias
=
True
,
dilation_rate
=
(
1
,
1
),
kernel_initializer
=
'glorot_uniform'
,
use_bias
=
True
,
bias_initializer
=
'zeros'
,
kernel_initializer
=
'glorot_uniform'
,
bias_regularizer
=
None
,
bias_initializer
=
'zeros'
,
weight_decay
=
None
,
# default find where is it is stated
bias_regularizer
=
None
,
use_bn
=
True
,
weight_decay
=
None
,
# default find where is it is stated
use_sync_bn
=
False
,
use_bn
=
True
,
norm_momentum
=
0.99
,
use_sync_bn
=
False
,
norm_epsilon
=
0.001
,
norm_momentum
=
0.99
,
activation
=
'leaky'
,
norm_epsilon
=
0.001
,
leaky_alpha
=
0.1
,
activation
=
'leaky'
,
**
kwargs
):
leaky_alpha
=
0.1
,
'''
**
kwargs
):
'''
Modified Convolution layer to match that of the DarkNet Library
Modified Convolution layer to match that of the DarkNet Library
Args:
Args:
...
@@ -56,120 +57,118 @@ class DarkConv(ks.layers.Layer):
...
@@ -56,120 +57,118 @@ class DarkConv(ks.layers.Layer):
'''
'''
# convolution params
# convolution params
self
.
_filters
=
filters
self
.
_filters
=
filters
self
.
_kernel_size
=
kernel_size
self
.
_kernel_size
=
kernel_size
self
.
_strides
=
strides
self
.
_strides
=
strides
self
.
_padding
=
padding
self
.
_padding
=
padding
self
.
_dilation_rate
=
dilation_rate
self
.
_dilation_rate
=
dilation_rate
self
.
_use_bias
=
use_bias
self
.
_use_bias
=
use_bias
self
.
_kernel_initializer
=
kernel_initializer
self
.
_kernel_initializer
=
kernel_initializer
self
.
_bias_initializer
=
bias_initializer
self
.
_bias_initializer
=
bias_initializer
self
.
_weight_decay
=
weight_decay
self
.
_weight_decay
=
weight_decay
self
.
_bias_regularizer
=
bias_regularizer
self
.
_bias_regularizer
=
bias_regularizer
# batchnorm params
# batchnorm params
self
.
_use_bn
=
use_bn
self
.
_use_bn
=
use_bn
if
self
.
_use_bn
:
if
self
.
_use_bn
:
self
.
_use_bias
=
False
self
.
_use_bias
=
False
self
.
_use_sync_bn
=
use_sync_bn
self
.
_use_sync_bn
=
use_sync_bn
self
.
_norm_moment
=
norm_momentum
self
.
_norm_moment
=
norm_momentum
self
.
_norm_epsilon
=
norm_epsilon
self
.
_norm_epsilon
=
norm_epsilon
if
tf
.
keras
.
backend
.
image_data_format
()
==
'channels_last'
:
if
tf
.
keras
.
backend
.
image_data_format
()
==
'channels_last'
:
# format: (batch_size, height, width, channels)
# format: (batch_size, height, width, channels)
self
.
_bn_axis
=
-
1
self
.
_bn_axis
=
-
1
else
:
else
:
# format: (batch_size, channels, width, height)
# format: (batch_size, channels, width, height)
self
.
_bn_axis
=
1
self
.
_bn_axis
=
1
# activation params
# activation params
if
activation
is
None
:
if
activation
is
None
:
self
.
_activation
=
'linear'
self
.
_activation
=
'linear'
else
:
else
:
self
.
_activation
=
activation
self
.
_activation
=
activation
self
.
_leaky_alpha
=
leaky_alpha
self
.
_leaky_alpha
=
leaky_alpha
super
(
DarkConv
,
self
).
__init__
(
**
kwargs
)
super
(
DarkConv
,
self
).
__init__
(
**
kwargs
)
return
return
def
build
(
self
,
input_shape
):
def
build
(
self
,
input_shape
):
kernel_size
=
self
.
_kernel_size
if
type
(
kernel_size
=
self
.
_kernel_size
if
type
(
self
.
_kernel_size
)
==
int
else
self
.
_kernel_size
[
0
]
self
.
_kernel_size
)
==
int
else
self
.
_kernel_size
[
0
]
if
self
.
_padding
==
"same"
and
kernel_size
!=
1
:
if
self
.
_padding
==
"same"
and
kernel_size
!=
1
:
self
.
_zeropad
=
ks
.
layers
.
ZeroPadding2D
(
self
.
_zeropad
=
ks
.
layers
.
ZeroPadding2D
(
((
1
,
1
),
(
1
,
1
)))
# symetric padding
((
1
,
1
),
(
1
,
1
)))
# symetric padding
else
:
else
:
self
.
_zeropad
=
Identity
()
self
.
_zeropad
=
Identity
()
self
.
conv
=
ks
.
layers
.
Conv2D
(
self
.
conv
=
ks
.
layers
.
Conv2D
(
filters
=
self
.
_filters
,
filters
=
self
.
_filters
,
kernel_size
=
self
.
_kernel_size
,
kernel_size
=
self
.
_kernel_size
,
strides
=
self
.
_strides
,
strides
=
self
.
_strides
,
padding
=
"valid"
,
#self._padding,
padding
=
"valid"
,
#self._padding,
dilation_rate
=
self
.
_dilation_rate
,
dilation_rate
=
self
.
_dilation_rate
,
use_bias
=
self
.
_use_bias
,
use_bias
=
self
.
_use_bias
,
kernel_initializer
=
self
.
_kernel_initializer
,
kernel_initializer
=
self
.
_kernel_initializer
,
bias_initializer
=
self
.
_bias_initializer
,
bias_initializer
=
self
.
_bias_initializer
,
kernel_regularizer
=
self
.
_weight_decay
,
kernel_regularizer
=
self
.
_weight_decay
,
bias_regularizer
=
self
.
_bias_regularizer
)
bias_regularizer
=
self
.
_bias_regularizer
)
#self.conv =tf.nn.convolution(filters=self._filters, strides=self._strides, padding=self._padding
#self.conv =tf.nn.convolution(filters=self._filters, strides=self._strides, padding=self._padding
if
self
.
_use_bn
:
if
self
.
_use_bn
:
if
self
.
_use_sync_bn
:
if
self
.
_use_sync_bn
:
self
.
bn
=
tf
.
keras
.
layers
.
experimental
.
SyncBatchNormalization
(
self
.
bn
=
tf
.
keras
.
layers
.
experimental
.
SyncBatchNormalization
(
momentum
=
self
.
_norm_moment
,
momentum
=
self
.
_norm_moment
,
epsilon
=
self
.
_norm_epsilon
,
epsilon
=
self
.
_norm_epsilon
,
axis
=
self
.
_bn_axis
)
axis
=
self
.
_bn_axis
)
else
:
else
:
self
.
bn
=
ks
.
layers
.
BatchNormalization
(
self
.
bn
=
ks
.
layers
.
BatchNormalization
(
momentum
=
self
.
_norm_moment
,
momentum
=
self
.
_norm_moment
,
epsilon
=
self
.
_norm_epsilon
,
epsilon
=
self
.
_norm_epsilon
,
axis
=
self
.
_bn_axis
)
axis
=
self
.
_bn_axis
)
else
:
else
:
self
.
bn
=
Identity
()
self
.
bn
=
Identity
()
if
self
.
_activation
==
'leaky'
:
if
self
.
_activation
==
'leaky'
:
alpha
=
{
"alpha"
:
self
.
_leaky_alpha
}
alpha
=
{
"alpha"
:
self
.
_leaky_alpha
}
self
.
_activation_fn
=
partial
(
tf
.
nn
.
leaky_relu
,
**
alpha
)
self
.
_activation_fn
=
partial
(
tf
.
nn
.
leaky_relu
,
**
alpha
)
elif
self
.
_activation
==
'mish'
:
elif
self
.
_activation
==
'mish'
:
self
.
_activation_fn
=
mish
()
self
.
_activation_fn
=
mish
()
else
:
else
:
self
.
_activation_fn
=
ks
.
layers
.
Activation
(
activation
=
self
.
_activation
)
self
.
_activation_fn
=
ks
.
layers
.
Activation
(
activation
=
self
.
_activation
)
super
(
DarkConv
,
self
).
build
(
input_shape
)
return
super
(
DarkConv
,
self
).
build
(
input_shape
)
return
def
call
(
self
,
inputs
):
x
=
self
.
_zeropad
(
inputs
)
def
call
(
self
,
inputs
):
x
=
self
.
conv
(
x
)
x
=
self
.
_zeropad
(
inputs
)
x
=
self
.
bn
(
x
)
x
=
self
.
conv
(
x
)
x
=
self
.
_activation_fn
(
x
)
x
=
self
.
bn
(
x
)
return
x
x
=
self
.
_activation_fn
(
x
)
return
x
def
get_config
(
self
):
# used to store/share parameters to reconsturct the model
def
get_config
(
self
):
layer_config
=
{
# used to store/share parameters to reconsturct the model
"filters"
:
self
.
_filters
,
layer_config
=
{
"kernel_size"
:
self
.
_kernel_size
,
"filters"
:
self
.
_filters
,
"strides"
:
self
.
_strides
,
"kernel_size"
:
self
.
_kernel_size
,
"padding"
:
self
.
_padding
,
"strides"
:
self
.
_strides
,
"dilation_rate"
:
self
.
_dilation_rate
,
"padding"
:
self
.
_padding
,
"use_bias"
:
self
.
_use_bias
,
"dilation_rate"
:
self
.
_dilation_rate
,
"kernel_initializer"
:
self
.
_kernel_initializer
,
"use_bias"
:
self
.
_use_bias
,
"bias_initializer"
:
self
.
_bias_initializer
,
"kernel_initializer"
:
self
.
_kernel_initializer
,
"bias_regularizer"
:
self
.
_bias_regularizer
,
"bias_initializer"
:
self
.
_bias_initializer
,
"l2_regularization"
:
self
.
_l2_regularization
,
"bias_regularizer"
:
self
.
_bias_regularizer
,
"use_bn"
:
self
.
_use_bn
,
"l2_regularization"
:
self
.
_l2_regularization
,
"use_sync_bn"
:
self
.
_use_sync_bn
,
"use_bn"
:
self
.
_use_bn
,
"norm_moment"
:
self
.
_norm_moment
,
"use_sync_bn"
:
self
.
_use_sync_bn
,
"norm_epsilon"
:
self
.
_norm_epsilon
,
"norm_moment"
:
self
.
_norm_moment
,
"activation"
:
self
.
_activation
,
"norm_epsilon"
:
self
.
_norm_epsilon
,
"leaky_alpha"
:
self
.
_leaky_alpha
"activation"
:
self
.
_activation
,
}
"leaky_alpha"
:
self
.
_leaky_alpha
layer_config
.
update
(
super
(
DarkConv
,
self
).
get_config
())
}
return
layer_config
layer_config
.
update
(
super
(
DarkConv
,
self
).
get_config
())
return
layer_config
def
__repr__
(
self
):
return
repr
(
self
.
get_config
())
def
__repr__
(
self
):
return
repr
(
self
.
get_config
())
official/vision/beta/projects/yolo/modeling/building_blocks/_DarkResidual.py
View file @
e43d75f3
...
@@ -7,24 +7,25 @@ from ._Identity import Identity
...
@@ -7,24 +7,25 @@ from ._Identity import Identity
@
ks
.
utils
.
register_keras_serializable
(
package
=
'yolo'
)
@
ks
.
utils
.
register_keras_serializable
(
package
=
'yolo'
)
class
DarkResidual
(
ks
.
layers
.
Layer
):
class
DarkResidual
(
ks
.
layers
.
Layer
):
def
__init__
(
self
,
filters
=
1
,
def
__init__
(
self
,
filter_scale
=
2
,
filters
=
1
,
use_bias
=
True
,
filter_scale
=
2
,
kernel_initializer
=
'glorot_uniform'
,
use_bias
=
True
,
bias_initializer
=
'zeros'
,
kernel_initializer
=
'glorot_uniform'
,
weight_decay
=
None
,
bias_initializer
=
'zeros'
,
bias_regularizer
=
None
,
weight_decay
=
None
,
use_bn
=
True
,
bias_regularizer
=
None
,
use_sync_bn
=
False
,
use_bn
=
True
,
norm_momentum
=
0.99
,
use_sync_bn
=
False
,
norm_epsilon
=
0.001
,
norm_momentum
=
0.99
,
activation
=
'leaky'
,
norm_epsilon
=
0.001
,
leaky_alpha
=
0.1
,
activation
=
'leaky'
,
sc_activation
=
'linear'
,
leaky_alpha
=
0.1
,
downsample
=
False
,
sc_activation
=
'linear'
,
**
kwargs
):
downsample
=
False
,
'''
**
kwargs
):
'''
DarkNet block with Residual connection for Yolo v3 Backbone
DarkNet block with Residual connection for Yolo v3 Backbone
Args:
Args:
...
@@ -46,113 +47,112 @@ class DarkResidual(ks.layers.Layer):
...
@@ -46,113 +47,112 @@ class DarkResidual(ks.layers.Layer):
**kwargs: Keyword Arguments
**kwargs: Keyword Arguments
'''
'''
# downsample
# downsample
self
.
_downsample
=
downsample
self
.
_downsample
=
downsample
# darkconv params
# darkconv params
self
.
_filters
=
filters
self
.
_filters
=
filters
self
.
_filter_scale
=
filter_scale
self
.
_filter_scale
=
filter_scale
self
.
_use_bias
=
use_bias
self
.
_use_bias
=
use_bias
self
.
_kernel_initializer
=
kernel_initializer
self
.
_kernel_initializer
=
kernel_initializer
self
.
_bias_initializer
=
bias_initializer
self
.
_bias_initializer
=
bias_initializer
self
.
_bias_regularizer
=
bias_regularizer
self
.
_bias_regularizer
=
bias_regularizer
self
.
_use_bn
=
use_bn
self
.
_use_bn
=
use_bn
self
.
_use_sync_bn
=
use_sync_bn
self
.
_use_sync_bn
=
use_sync_bn
self
.
_weight_decay
=
weight_decay
self
.
_weight_decay
=
weight_decay
# normal params
# normal params
self
.
_norm_moment
=
norm_momentum
self
.
_norm_moment
=
norm_momentum
self
.
_norm_epsilon
=
norm_epsilon
self
.
_norm_epsilon
=
norm_epsilon
# activation params
# activation params
self
.
_conv_activation
=
activation
self
.
_conv_activation
=
activation
self
.
_leaky_alpha
=
leaky_alpha
self
.
_leaky_alpha
=
leaky_alpha
self
.
_sc_activation
=
sc_activation
self
.
_sc_activation
=
sc_activation
super
().
__init__
(
**
kwargs
)
super
().
__init__
(
**
kwargs
)
return
return
def
build
(
self
,
input_shape
):
def
build
(
self
,
input_shape
):
if
self
.
_downsample
:
if
self
.
_downsample
:
self
.
_dconv
=
DarkConv
(
filters
=
self
.
_filters
,
self
.
_dconv
=
DarkConv
(
filters
=
self
.
_filters
,
kernel_size
=
(
3
,
3
),
kernel_size
=
(
3
,
3
),
strides
=
(
2
,
2
),
strides
=
(
2
,
2
),
padding
=
'same'
,
padding
=
'same'
,
use_bias
=
self
.
_use_bias
,
use_bias
=
self
.
_use_bias
,
kernel_initializer
=
self
.
_kernel_initializer
,
kernel_initializer
=
self
.
_kernel_initializer
,
bias_initializer
=
self
.
_bias_initializer
,
bias_initializer
=
self
.
_bias_initializer
,
bias_regularizer
=
self
.
_bias_regularizer
,
bias_regularizer
=
self
.
_bias_regularizer
,
use_bn
=
self
.
_use_bn
,
use_bn
=
self
.
_use_bn
,
use_sync_bn
=
self
.
_use_sync_bn
,
use_sync_bn
=
self
.
_use_sync_bn
,
norm_momentum
=
self
.
_norm_moment
,
norm_momentum
=
self
.
_norm_moment
,
norm_epsilon
=
self
.
_norm_epsilon
,
norm_epsilon
=
self
.
_norm_epsilon
,
activation
=
self
.
_conv_activation
,
activation
=
self
.
_conv_activation
,
weight_decay
=
self
.
_weight_decay
,
weight_decay
=
self
.
_weight_decay
,
leaky_alpha
=
self
.
_leaky_alpha
)
leaky_alpha
=
self
.
_leaky_alpha
)
else
:
else
:
self
.
_dconv
=
Identity
()
self
.
_dconv
=
Identity
()
self
.
_conv1
=
DarkConv
(
filters
=
self
.
_filters
//
self
.
_filter_scale
,
self
.
_conv1
=
DarkConv
(
filters
=
self
.
_filters
//
self
.
_filter_scale
,
kernel_size
=
(
1
,
1
),
kernel_size
=
(
1
,
1
),
strides
=
(
1
,
1
),
strides
=
(
1
,
1
),
padding
=
'same'
,
padding
=
'same'
,
use_bias
=
self
.
_use_bias
,
use_bias
=
self
.
_use_bias
,
kernel_initializer
=
self
.
_kernel_initializer
,
kernel_initializer
=
self
.
_kernel_initializer
,
bias_initializer
=
self
.
_bias_initializer
,
bias_initializer
=
self
.
_bias_initializer
,
bias_regularizer
=
self
.
_bias_regularizer
,
bias_regularizer
=
self
.
_bias_regularizer
,
use_bn
=
self
.
_use_bn
,
use_bn
=
self
.
_use_bn
,
use_sync_bn
=
self
.
_use_sync_bn
,
use_sync_bn
=
self
.
_use_sync_bn
,
norm_momentum
=
self
.
_norm_moment
,
norm_momentum
=
self
.
_norm_moment
,
norm_epsilon
=
self
.
_norm_epsilon
,
norm_epsilon
=
self
.
_norm_epsilon
,
activation
=
self
.
_conv_activation
,
activation
=
self
.
_conv_activation
,
weight_decay
=
self
.
_weight_decay
,
weight_decay
=
self
.
_weight_decay
,
leaky_alpha
=
self
.
_leaky_alpha
)
leaky_alpha
=
self
.
_leaky_alpha
)
self
.
_conv2
=
DarkConv
(
filters
=
self
.
_filters
,
self
.
_conv2
=
DarkConv
(
filters
=
self
.
_filters
,
kernel_size
=
(
3
,
3
),
kernel_size
=
(
3
,
3
),
strides
=
(
1
,
1
),
strides
=
(
1
,
1
),
padding
=
'same'
,
padding
=
'same'
,
use_bias
=
self
.
_use_bias
,
use_bias
=
self
.
_use_bias
,
kernel_initializer
=
self
.
_kernel_initializer
,
kernel_initializer
=
self
.
_kernel_initializer
,
bias_initializer
=
self
.
_bias_initializer
,
bias_initializer
=
self
.
_bias_initializer
,
bias_regularizer
=
self
.
_bias_regularizer
,
bias_regularizer
=
self
.
_bias_regularizer
,
use_bn
=
self
.
_use_bn
,
use_bn
=
self
.
_use_bn
,
use_sync_bn
=
self
.
_use_sync_bn
,
use_sync_bn
=
self
.
_use_sync_bn
,
norm_momentum
=
self
.
_norm_moment
,
norm_momentum
=
self
.
_norm_moment
,
norm_epsilon
=
self
.
_norm_epsilon
,
norm_epsilon
=
self
.
_norm_epsilon
,
activation
=
self
.
_conv_activation
,
activation
=
self
.
_conv_activation
,
weight_decay
=
self
.
_weight_decay
,
weight_decay
=
self
.
_weight_decay
,
leaky_alpha
=
self
.
_leaky_alpha
)
leaky_alpha
=
self
.
_leaky_alpha
)
self
.
_shortcut
=
ks
.
layers
.
Add
()
self
.
_shortcut
=
ks
.
layers
.
Add
()
self
.
_activation_fn
=
ks
.
layers
.
Activation
(
self
.
_activation_fn
=
ks
.
layers
.
Activation
(
activation
=
self
.
_sc_activation
)
activation
=
self
.
_sc_activation
)
super
().
build
(
input_shape
)
super
().
build
(
input_shape
)
return
return
def
call
(
self
,
inputs
):
def
call
(
self
,
inputs
):
shortcut
=
self
.
_dconv
(
inputs
)
shortcut
=
self
.
_dconv
(
inputs
)
x
=
self
.
_conv1
(
shortcut
)
x
=
self
.
_conv1
(
shortcut
)
x
=
self
.
_conv2
(
x
)
x
=
self
.
_conv2
(
x
)
x
=
self
.
_shortcut
([
x
,
shortcut
])
x
=
self
.
_shortcut
([
x
,
shortcut
])
return
self
.
_activation_fn
(
x
)
return
self
.
_activation_fn
(
x
)
def
get_config
(
self
):
def
get_config
(
self
):
# used to store/share parameters to reconsturct the model
# used to store/share parameters to reconsturct the model
layer_config
=
{
layer_config
=
{
"filters"
:
self
.
_filters
,
"filters"
:
self
.
_filters
,
"use_bias"
:
self
.
_use_bias
,
"use_bias"
:
self
.
_use_bias
,
"kernel_initializer"
:
self
.
_kernel_initializer
,
"kernel_initializer"
:
self
.
_kernel_initializer
,
"bias_initializer"
:
self
.
_bias_initializer
,
"bias_initializer"
:
self
.
_bias_initializer
,
"weight_decay"
:
self
.
_weight_decay
,
"weight_decay"
:
self
.
_weight_decay
,
"use_bn"
:
self
.
_use_bn
,
"use_bn"
:
self
.
_use_bn
,
"use_sync_bn"
:
self
.
_use_sync_bn
,
"use_sync_bn"
:
self
.
_use_sync_bn
,
"norm_moment"
:
self
.
_norm_moment
,
"norm_moment"
:
self
.
_norm_moment
,
"norm_epsilon"
:
self
.
_norm_epsilon
,
"norm_epsilon"
:
self
.
_norm_epsilon
,
"activation"
:
self
.
_conv_activation
,
"activation"
:
self
.
_conv_activation
,
"leaky_alpha"
:
self
.
_leaky_alpha
,
"leaky_alpha"
:
self
.
_leaky_alpha
,
"sc_activation"
:
self
.
_sc_activation
,
"sc_activation"
:
self
.
_sc_activation
,
"downsample"
:
self
.
_downsample
"downsample"
:
self
.
_downsample
}
}
layer_config
.
update
(
super
().
get_config
())
layer_config
.
update
(
super
().
get_config
())
return
layer_config
return
layer_config
official/vision/beta/projects/yolo/modeling/building_blocks/_DarkTiny.py
View file @
e43d75f3
...
@@ -6,98 +6,100 @@ from ._DarkConv import DarkConv
...
@@ -6,98 +6,100 @@ from ._DarkConv import DarkConv
@
ks
.
utils
.
register_keras_serializable
(
package
=
'yolo'
)
@
ks
.
utils
.
register_keras_serializable
(
package
=
'yolo'
)
class
DarkTiny
(
ks
.
layers
.
Layer
):
class
DarkTiny
(
ks
.
layers
.
Layer
):
def
__init__
(
self
,
filters
=
1
,
use_bias
=
True
,
strides
=
2
,
kernel_initializer
=
'glorot_uniform'
,
bias_initializer
=
'zeros'
,
bias_regularizer
=
None
,
weight_decay
=
None
,
# default find where is it is stated
use_bn
=
True
,
use_sync_bn
=
False
,
norm_momentum
=
0.99
,
norm_epsilon
=
0.001
,
activation
=
'leaky'
,
leaky_alpha
=
0.1
,
sc_activation
=
'linear'
,
**
kwargs
):
# darkconv params
def
__init__
(
self
.
_filters
=
filters
self
,
self
.
_use_bias
=
use_bias
filters
=
1
,
self
.
_kernel_initializer
=
kernel_initializer
use_bias
=
True
,
self
.
_bias_initializer
=
bias_initializer
strides
=
2
,
self
.
_bias_regularizer
=
bias_regularizer
kernel_initializer
=
'glorot_uniform'
,
self
.
_use_bn
=
use_bn
bias_initializer
=
'zeros'
,
self
.
_use_sync_bn
=
use_sync_bn
bias_regularizer
=
None
,
self
.
_strides
=
strides
weight_decay
=
None
,
# default find where is it is stated
self
.
_weight_decay
=
weight_decay
use_bn
=
True
,
use_sync_bn
=
False
,
norm_momentum
=
0.99
,
norm_epsilon
=
0.001
,
activation
=
'leaky'
,
leaky_alpha
=
0.1
,
sc_activation
=
'linear'
,
**
kwargs
):
# normal params
# darkconv params
self
.
_norm_moment
=
norm_momentum
self
.
_filters
=
filters
self
.
_norm_epsilon
=
norm_epsilon
self
.
_use_bias
=
use_bias
self
.
_kernel_initializer
=
kernel_initializer
self
.
_bias_initializer
=
bias_initializer
self
.
_bias_regularizer
=
bias_regularizer
self
.
_use_bn
=
use_bn
self
.
_use_sync_bn
=
use_sync_bn
self
.
_strides
=
strides
self
.
_weight_decay
=
weight_decay
# activation params
# normal params
self
.
_conv_activation
=
activation
self
.
_norm_moment
=
norm_momentum
self
.
_leaky_alpha
=
leaky_alpha
self
.
_norm_epsilon
=
norm_epsilon
self
.
_sc_activation
=
sc_activation
super
().
__init__
(
**
kwargs
)
# activation params
return
self
.
_conv_activation
=
activation
self
.
_leaky_alpha
=
leaky_alpha
self
.
_sc_activation
=
sc_activation
def
build
(
self
,
input_shape
):
super
().
__init__
(
**
kwargs
)
# if self._strides == 2:
return
# self._zeropad = ks.layers.ZeroPadding2D(((1,0), (1,0)))
# padding = "valid"
# else:
# self._zeropad = ks.layers.ZeroPadding2D(((0,1), (0,1)))#nn_blocks.Identity()#ks.layers.ZeroPadding2D(((1,0), (1,0)))
# padding = "valid"
self
.
_maxpool
=
tf
.
keras
.
layers
.
MaxPool2D
(
pool_size
=
2
,
strides
=
self
.
_strides
,
padding
=
"same"
,
data_format
=
None
)
self
.
_convlayer
=
DarkConv
(
filters
=
self
.
_filters
,
def
build
(
self
,
input_shape
):
kernel_size
=
(
3
,
3
),
# if self._strides == 2:
strides
=
(
1
,
1
),
# self._zeropad = ks.layers.ZeroPadding2D(((1,0), (1,0)))
padding
=
'same'
,
# padding = "valid"
use_bias
=
self
.
_use_bias
,
# else:
kernel_initializer
=
self
.
_kernel_initializer
,
# self._zeropad = ks.layers.ZeroPadding2D(((0,1), (0,1)))#nn_blocks.Identity()#ks.layers.ZeroPadding2D(((1,0), (1,0)))
bias_initializer
=
self
.
_bias_initializer
,
# padding = "valid"
bias_regularizer
=
self
.
_bias_regularizer
,
self
.
_maxpool
=
tf
.
keras
.
layers
.
MaxPool2D
(
pool_size
=
2
,
weight_decay
=
self
.
_weight_decay
,
strides
=
self
.
_strides
,
use_bn
=
self
.
_use_bn
,
padding
=
"same"
,
use_sync_bn
=
self
.
_use_sync_bn
,
data_format
=
None
)
norm_momentum
=
self
.
_norm_moment
,
norm_epsilon
=
self
.
_norm_epsilon
,
activation
=
self
.
_conv_activation
,
leaky_alpha
=
self
.
_leaky_alpha
)
super
().
build
(
input_shape
)
self
.
_convlayer
=
DarkConv
(
filters
=
self
.
_filters
,
return
kernel_size
=
(
3
,
3
),
strides
=
(
1
,
1
),
padding
=
'same'
,
use_bias
=
self
.
_use_bias
,
kernel_initializer
=
self
.
_kernel_initializer
,
bias_initializer
=
self
.
_bias_initializer
,
bias_regularizer
=
self
.
_bias_regularizer
,
weight_decay
=
self
.
_weight_decay
,
use_bn
=
self
.
_use_bn
,
use_sync_bn
=
self
.
_use_sync_bn
,
norm_momentum
=
self
.
_norm_moment
,
norm_epsilon
=
self
.
_norm_epsilon
,
activation
=
self
.
_conv_activation
,
leaky_alpha
=
self
.
_leaky_alpha
)
def
call
(
self
,
inputs
):
super
().
build
(
input_shape
)
output
=
self
.
_maxpool
(
inputs
)
return
output
=
self
.
_convlayer
(
output
)
return
output
def
get_config
(
self
):
def
call
(
self
,
inputs
):
# used to store/share parameters to reconsturct the model
output
=
self
.
_maxpool
(
inputs
)
layer_config
=
{
output
=
self
.
_convlayer
(
output
)
"filters"
:
self
.
_filters
,
return
output
"use_bias"
:
self
.
_use_bias
,
"strides"
:
self
.
_strides
,
def
get_config
(
self
):
"kernel_initializer"
:
self
.
_kernel_initializer
,
# used to store/share parameters to reconsturct the model
"bias_initializer"
:
self
.
_bias_initializer
,
layer_config
=
{
"l2_regularization"
:
self
.
_l2_regularization
,
"filters"
:
self
.
_filters
,
"use_bn"
:
self
.
_use_bn
,
"use_bias"
:
self
.
_use_bias
,
"use_sync_bn"
:
self
.
_use_sync_bn
,
"strides"
:
self
.
_strides
,
"norm_moment"
:
self
.
_norm_moment
,
"kernel_initializer"
:
self
.
_kernel_initializer
,
"norm_epsilon"
:
self
.
_norm_epsilon
,
"bias_initializer"
:
self
.
_bias_initializer
,
"activation"
:
self
.
_conv_activation
,
"l2_regularization"
:
self
.
_l2_regularization
,
"leaky_alpha"
:
self
.
_leaky_alpha
,
"use_bn"
:
self
.
_use_bn
,
"sc_activation"
:
self
.
_sc_activation
,
"use_sync_bn"
:
self
.
_use_sync_bn
,
}
"norm_moment"
:
self
.
_norm_moment
,
layer_config
.
update
(
super
().
get_config
())
"norm_epsilon"
:
self
.
_norm_epsilon
,
return
layer_config
"activation"
:
self
.
_conv_activation
,
"leaky_alpha"
:
self
.
_leaky_alpha
,
"sc_activation"
:
self
.
_sc_activation
,
}
layer_config
.
update
(
super
().
get_config
())
return
layer_config
official/vision/beta/projects/yolo/modeling/building_blocks/__init__.py
View file @
e43d75f3
...
@@ -4,4 +4,3 @@ from ._DarkTiny import DarkTiny
...
@@ -4,4 +4,3 @@ from ._DarkTiny import DarkTiny
from
._CSPConnect
import
CSPConnect
from
._CSPConnect
import
CSPConnect
from
._CSPDownSample
import
CSPDownSample
from
._CSPDownSample
import
CSPDownSample
from
._CSPTiny
import
CSPTiny
from
._CSPTiny
import
CSPTiny
official/vision/beta/projects/yolo/modeling/tests/test_CSPConnect.py
View file @
e43d75f3
...
@@ -8,48 +8,49 @@ from official.vision.beta.projects.yolo.modeling.building_blocks import CSPConne
...
@@ -8,48 +8,49 @@ from official.vision.beta.projects.yolo.modeling.building_blocks import CSPConne
class
CSPConnect
(
tf
.
test
.
TestCase
,
parameterized
.
TestCase
):
class
CSPConnect
(
tf
.
test
.
TestCase
,
parameterized
.
TestCase
):
@
parameterized
.
named_parameters
((
"same"
,
224
,
224
,
64
,
1
),
(
"downsample"
,
224
,
224
,
64
,
2
))
@
parameterized
.
named_parameters
((
"same"
,
224
,
224
,
64
,
1
),
def
test_pass_through
(
self
,
width
,
height
,
filters
,
mod
):
(
"downsample"
,
224
,
224
,
64
,
2
))
x
=
ks
.
Input
(
shape
=
(
width
,
height
,
filters
))
def
test_pass_through
(
self
,
width
,
height
,
filters
,
mod
):
test_layer
=
layer
(
filters
=
filters
,
filter_reduce
=
mod
)
x
=
ks
.
Input
(
shape
=
(
width
,
height
,
filters
))
test_layer2
=
layer_companion
(
filters
=
filters
,
filter_reduce
=
mod
)
test_layer
=
layer
(
filters
=
filters
,
filter_reduce
=
mod
)
outx
,
px
=
test_layer
(
x
)
test_layer2
=
layer_companion
(
filters
=
filters
,
filter_reduce
=
mod
)
outx
=
test_layer2
([
outx
,
px
])
outx
,
px
=
test_layer
(
x
)
print
(
outx
)
outx
=
test_layer2
([
outx
,
px
])
print
(
outx
.
shape
.
as_list
())
print
(
outx
)
self
.
assertAllEqual
(
print
(
outx
.
shape
.
as_list
())
outx
.
shape
.
as_list
(),
self
.
assertAllEqual
(
[
None
,
np
.
ceil
(
width
//
2
),
outx
.
shape
.
as_list
(),
np
.
ceil
(
height
//
2
),
(
filters
)])
[
None
,
np
.
ceil
(
width
//
2
),
return
np
.
ceil
(
height
//
2
),
(
filters
)])
return
@
parameterized
.
named_parameters
((
"same"
,
224
,
224
,
64
,
1
),
(
"downsample"
,
224
,
224
,
128
,
2
))
@
parameterized
.
named_parameters
((
"same"
,
224
,
224
,
64
,
1
),
def
test_gradient_pass_though
(
self
,
filters
,
width
,
height
,
mod
):
(
"downsample"
,
224
,
224
,
128
,
2
))
loss
=
ks
.
losses
.
MeanSquaredError
()
def
test_gradient_pass_though
(
self
,
filters
,
width
,
height
,
mod
):
optimizer
=
ks
.
optimizers
.
SGD
()
loss
=
ks
.
losses
.
MeanSquaredError
()
test_layer
=
layer
(
filters
,
filter_reduce
=
mod
)
optimizer
=
ks
.
optimizers
.
SGD
()
path_layer
=
layer_companion
(
filters
,
filter_reduce
=
mod
)
test_layer
=
layer
(
filters
,
filter_reduce
=
mod
)
path_layer
=
layer_companion
(
filters
,
filter_reduce
=
mod
)
init
=
tf
.
random_normal_initializer
()
x
=
tf
.
Variable
(
initial_value
=
init
(
shape
=
(
1
,
width
,
height
,
filters
),
init
=
tf
.
random_normal_initializer
()
dtype
=
tf
.
float32
))
x
=
tf
.
Variable
(
y
=
tf
.
Variable
(
initial_value
=
init
(
shape
=
(
1
,
int
(
np
.
ceil
(
width
//
2
)),
initial_value
=
init
(
shape
=
(
1
,
width
,
height
,
filters
),
dtype
=
tf
.
float32
))
int
(
np
.
ceil
(
height
//
2
)),
y
=
tf
.
Variable
(
initial_value
=
init
(
shape
=
(
1
,
int
(
np
.
ceil
(
width
//
2
)),
filters
),
int
(
np
.
ceil
(
height
//
2
)),
dtype
=
tf
.
float32
))
filters
),
dtype
=
tf
.
float32
))
with
tf
.
GradientTape
()
as
tape
:
x_hat
,
x_prev
=
test_layer
(
x
)
with
tf
.
GradientTape
()
as
tape
:
x_hat
=
path_layer
([
x_hat
,
x_prev
])
x_hat
,
x_prev
=
test_layer
(
x
)
grad_loss
=
loss
(
x_hat
,
y
)
x_hat
=
path_layer
([
x_hat
,
x_prev
])
grad
=
tape
.
gradient
(
grad_loss
,
test_layer
.
trainable_variables
)
grad_loss
=
loss
(
x_hat
,
y
)
optimizer
.
apply_gradients
(
zip
(
grad
,
test_layer
.
trainable_variables
))
grad
=
tape
.
gradient
(
grad_loss
,
test_layer
.
trainable_variables
)
optimizer
.
apply_gradients
(
zip
(
grad
,
test_layer
.
trainable_variables
))
self
.
assertNotIn
(
None
,
grad
)
return
self
.
assertNotIn
(
None
,
grad
)
return
if
__name__
==
"__main__"
:
if
__name__
==
"__main__"
:
tf
.
test
.
main
()
tf
.
test
.
main
()
official/vision/beta/projects/yolo/modeling/tests/test_CSPDownSample.py
View file @
e43d75f3
...
@@ -6,48 +6,49 @@ from absl.testing import parameterized
...
@@ -6,48 +6,49 @@ from absl.testing import parameterized
from
official.vision.beta.projects.yolo.modeling.building_blocks
import
CSPDownSample
as
layer
from
official.vision.beta.projects.yolo.modeling.building_blocks
import
CSPDownSample
as
layer
from
official.vision.beta.projects.yolo.modeling.building_blocks
import
CSPConnect
as
layer_companion
from
official.vision.beta.projects.yolo.modeling.building_blocks
import
CSPConnect
as
layer_companion
class
CSPDownSample
(
tf
.
test
.
TestCase
,
parameterized
.
TestCase
):
class
CSPDownSample
(
tf
.
test
.
TestCase
,
parameterized
.
TestCase
):
@
parameterized
.
named_parameters
((
"same"
,
224
,
224
,
64
,
1
),
(
"downsampl
e"
,
224
,
224
,
64
,
2
))
@
parameterized
.
named_parameters
((
"sam
e"
,
224
,
224
,
64
,
1
),
def
test_pass_through
(
self
,
width
,
height
,
filters
,
mod
):
(
"downsample"
,
224
,
224
,
64
,
2
))
x
=
ks
.
Input
(
shape
=
(
width
,
height
,
filters
))
def
test_pass_through
(
self
,
width
,
height
,
filters
,
mod
):
test_layer
=
layer
(
filters
=
filters
,
filter_reduce
=
mod
)
x
=
ks
.
Input
(
shape
=
(
width
,
height
,
filters
)
)
outx
,
px
=
test_layer
(
x
)
test_layer
=
layer
(
filters
=
filters
,
filter_reduce
=
mod
)
print
(
out
x
)
outx
,
px
=
test_layer
(
x
)
print
(
outx
.
shape
.
as_list
()
)
print
(
outx
)
self
.
assertAllEqual
(
print
(
outx
.
shape
.
as_list
())
outx
.
shape
.
as_list
(),
self
.
assertAllEqual
(
[
None
,
outx
.
shape
.
as_list
()
,
np
.
ceil
(
width
//
2
),
[
None
,
np
.
ceil
(
width
//
2
),
np
.
ceil
(
height
//
2
),
(
filters
/
mod
)])
np
.
ceil
(
height
//
2
),
(
filters
/
mod
)])
return
return
@
parameterized
.
named_parameters
((
"same"
,
224
,
224
,
64
,
1
),
@
parameterized
.
named_parameters
((
"same"
,
224
,
224
,
64
,
1
),
(
"downsample"
,
224
,
224
,
128
,
2
))
(
"downsample"
,
224
,
224
,
128
,
2
))
def
test_gradient_pass_though
(
self
,
filters
,
width
,
height
,
mod
):
def
test_gradient_pass_though
(
self
,
filters
,
width
,
height
,
mod
):
loss
=
ks
.
losses
.
MeanSquaredError
()
loss
=
ks
.
losses
.
MeanSquaredError
()
optimizer
=
ks
.
optimizers
.
SGD
()
optimizer
=
ks
.
optimizers
.
SGD
()
test_layer
=
layer
(
filters
,
filter_reduce
=
mod
)
test_layer
=
layer
(
filters
,
filter_reduce
=
mod
)
path_layer
=
layer_companion
(
filters
,
filter_reduce
=
mod
)
path_layer
=
layer_companion
(
filters
,
filter_reduce
=
mod
)
init
=
tf
.
random_normal_initializer
()
init
=
tf
.
random_normal_initializer
()
x
=
tf
.
Variable
(
initial_value
=
init
(
shape
=
(
1
,
width
,
height
,
filters
),
x
=
tf
.
Variable
(
dtype
=
tf
.
float32
))
initial_value
=
init
(
shape
=
(
1
,
width
,
height
,
filters
),
dtype
=
tf
.
float32
))
y
=
tf
.
Variable
(
initial_value
=
init
(
shape
=
(
1
,
int
(
np
.
ceil
(
width
//
2
)),
y
=
tf
.
Variable
(
initial_value
=
init
(
shape
=
(
1
,
int
(
np
.
ceil
(
width
//
2
)),
int
(
np
.
ceil
(
height
//
2
)),
int
(
np
.
ceil
(
height
//
2
)),
filters
),
filters
),
dtype
=
tf
.
float32
))
dtype
=
tf
.
float32
))
with
tf
.
GradientTape
()
as
tape
:
with
tf
.
GradientTape
()
as
tape
:
x_hat
,
x_prev
=
test_layer
(
x
)
x_hat
,
x_prev
=
test_layer
(
x
)
x_hat
=
path_layer
([
x_hat
,
x_prev
])
x_hat
=
path_layer
([
x_hat
,
x_prev
])
grad_loss
=
loss
(
x_hat
,
y
)
grad_loss
=
loss
(
x_hat
,
y
)
grad
=
tape
.
gradient
(
grad_loss
,
test_layer
.
trainable_variables
)
grad
=
tape
.
gradient
(
grad_loss
,
test_layer
.
trainable_variables
)
optimizer
.
apply_gradients
(
zip
(
grad
,
test_layer
.
trainable_variables
))
optimizer
.
apply_gradients
(
zip
(
grad
,
test_layer
.
trainable_variables
))
self
.
assertNotIn
(
None
,
grad
)
self
.
assertNotIn
(
None
,
grad
)
return
return
if
__name__
==
"__main__"
:
if
__name__
==
"__main__"
:
tf
.
test
.
main
()
tf
.
test
.
main
()
official/vision/beta/projects/yolo/modeling/tests/test_DarkConv.py
View file @
e43d75f3
...
@@ -5,67 +5,68 @@ from absl.testing import parameterized
...
@@ -5,67 +5,68 @@ from absl.testing import parameterized
from
official.vision.beta.projects.yolo.modeling.building_blocks
import
DarkConv
from
official.vision.beta.projects.yolo.modeling.building_blocks
import
DarkConv
class
DarkConvTest
(
tf
.
test
.
TestCase
,
parameterized
.
TestCase
):
class
DarkConvTest
(
tf
.
test
.
TestCase
,
parameterized
.
TestCase
):
@
parameterized
.
named_parameters
((
"valid"
,
(
3
,
3
),
"valid"
,
(
1
,
1
)),
(
"same"
,
(
3
,
3
),
"same"
,
(
1
,
1
)),
(
"downsample"
,
(
3
,
3
),
"same"
,
(
2
,
2
)),
(
"test"
,
(
1
,
1
),
"valid"
,
(
1
,
1
)))
def
test_pass_through
(
self
,
kernel_size
,
padding
,
strides
):
if
padding
==
"same"
:
pad_const
=
1
else
:
pad_const
=
0
x
=
ks
.
Input
(
shape
=
(
224
,
224
,
3
))
test_layer
=
DarkConv
(
filters
=
64
,
kernel_size
=
kernel_size
,
padding
=
padding
,
strides
=
strides
,
trainable
=
False
)
outx
=
test_layer
(
x
)
print
(
outx
.
shape
.
as_list
())
test
=
[
None
,
int
((
224
-
kernel_size
[
0
]
+
(
2
*
pad_const
))
/
strides
[
0
]
+
1
),
int
((
224
-
kernel_size
[
1
]
+
(
2
*
pad_const
))
/
strides
[
1
]
+
1
),
64
]
print
(
test
)
self
.
assertAllEqual
(
outx
.
shape
.
as_list
(),
test
)
return
@
parameterized
.
named_parameters
((
"filters"
,
3
))
@
parameterized
.
named_parameters
(
def
test_gradient_pass_though
(
self
,
filters
):
(
"valid"
,
(
3
,
3
),
"valid"
,
(
1
,
1
)),
(
"same"
,
(
3
,
3
),
"same"
,
(
1
,
1
)),
loss
=
ks
.
losses
.
MeanSquaredError
()
(
"downsample"
,
(
3
,
3
),
"same"
,
(
2
,
2
)),
(
"test"
,
(
1
,
1
),
"valid"
,
(
1
,
1
)))
optimizer
=
ks
.
optimizers
.
SGD
()
def
test_pass_through
(
self
,
kernel_size
,
padding
,
strides
):
with
tf
.
device
(
"/CPU:0"
):
if
padding
==
"same"
:
test_layer
=
DarkConv
(
filters
,
kernel_size
=
(
3
,
3
),
padding
=
"same"
)
pad_const
=
1
else
:
pad_const
=
0
x
=
ks
.
Input
(
shape
=
(
224
,
224
,
3
))
test_layer
=
DarkConv
(
filters
=
64
,
kernel_size
=
kernel_size
,
padding
=
padding
,
strides
=
strides
,
trainable
=
False
)
outx
=
test_layer
(
x
)
print
(
outx
.
shape
.
as_list
())
test
=
[
None
,
int
((
224
-
kernel_size
[
0
]
+
(
2
*
pad_const
))
/
strides
[
0
]
+
1
),
int
((
224
-
kernel_size
[
1
]
+
(
2
*
pad_const
))
/
strides
[
1
]
+
1
),
64
]
print
(
test
)
self
.
assertAllEqual
(
outx
.
shape
.
as_list
(),
test
)
return
@
parameterized
.
named_parameters
((
"filters"
,
3
))
def
test_gradient_pass_though
(
self
,
filters
):
loss
=
ks
.
losses
.
MeanSquaredError
()
optimizer
=
ks
.
optimizers
.
SGD
()
with
tf
.
device
(
"/CPU:0"
):
test_layer
=
DarkConv
(
filters
,
kernel_size
=
(
3
,
3
),
padding
=
"same"
)
init
=
tf
.
random_normal_initializer
()
init
=
tf
.
random_normal_initializer
()
x
=
tf
.
Variable
(
x
=
tf
.
Variable
(
initial_value
=
init
(
shape
=
(
1
,
224
,
224
,
initial_value
=
init
(
shape
=
(
1
,
224
,
224
,
3
),
dtype
=
tf
.
float32
))
3
),
dtype
=
tf
.
float32
))
y
=
tf
.
Variable
(
y
=
tf
.
Variable
(
initial_value
=
init
(
shape
=
(
1
,
224
,
224
,
filters
),
dtype
=
tf
.
float32
))
initial_value
=
init
(
shape
=
(
1
,
224
,
224
,
filters
),
dtype
=
tf
.
float32
))
with
tf
.
GradientTape
()
as
tape
:
with
tf
.
GradientTape
()
as
tape
:
x_hat
=
test_layer
(
x
)
x_hat
=
test_layer
(
x
)
grad_loss
=
loss
(
x_hat
,
y
)
grad_loss
=
loss
(
x_hat
,
y
)
grad
=
tape
.
gradient
(
grad_loss
,
test_layer
.
trainable_variables
)
grad
=
tape
.
gradient
(
grad_loss
,
test_layer
.
trainable_variables
)
optimizer
.
apply_gradients
(
zip
(
grad
,
test_layer
.
trainable_variables
))
optimizer
.
apply_gradients
(
zip
(
grad
,
test_layer
.
trainable_variables
))
self
.
assertNotIn
(
None
,
grad
)
self
.
assertNotIn
(
None
,
grad
)
return
return
# @parameterized.named_parameters(("filters", 3), ("filters", 20), ("filters", 512))
# @parameterized.named_parameters(("filters", 3), ("filters", 20), ("filters", 512))
# def test_time(self, filters):
# def test_time(self, filters):
# # finish the test for time
# # finish the test for time
# dataset = tfds.load("mnist")
# dataset = tfds.load("mnist")
# model = ks.Sequential([
# model = ks.Sequential([
# DarkConv(7, kernel_size=(3,3), strides = (2,2), activation='relu'),
# DarkConv(7, kernel_size=(3,3), strides = (2,2), activation='relu'),
# DarkConv(10, kernel_size=(3,3), strides = (2,2), activation='relu'),
# DarkConv(10, kernel_size=(3,3), strides = (2,2), activation='relu'),
# DarkConv(filters, kernel_size=(3,3), strides = (1,1), activation='relu'),
# DarkConv(filters, kernel_size=(3,3), strides = (1,1), activation='relu'),
# DarkConv(9, kernel_size=(3,3), strides = (2,2), activation='relu'),
# DarkConv(9, kernel_size=(3,3), strides = (2,2), activation='relu'),
# ks.layers.GlobalAveragePooling2D(),
# ks.layers.GlobalAveragePooling2D(),
# ks.layers.Dense(10, activation='softmax')], name='test')
# ks.layers.Dense(10, activation='softmax')], name='test')
# return
# return
if
__name__
==
"__main__"
:
if
__name__
==
"__main__"
:
tf
.
test
.
main
()
tf
.
test
.
main
()
official/vision/beta/projects/yolo/modeling/tests/test_DarkResidual.py
View file @
e43d75f3
...
@@ -7,54 +7,55 @@ from official.vision.beta.projects.yolo.modeling.building_blocks import DarkResi
...
@@ -7,54 +7,55 @@ from official.vision.beta.projects.yolo.modeling.building_blocks import DarkResi
class
DarkResidualTest
(
tf
.
test
.
TestCase
,
parameterized
.
TestCase
):
class
DarkResidualTest
(
tf
.
test
.
TestCase
,
parameterized
.
TestCase
):
@
parameterized
.
named_parameters
((
"same"
,
224
,
224
,
64
,
False
),
(
"downsample"
,
223
,
223
,
32
,
True
),
@
parameterized
.
named_parameters
((
"same"
,
224
,
224
,
64
,
False
),
(
"oddball"
,
223
,
223
,
32
,
False
))
(
"downsample"
,
223
,
223
,
32
,
True
),
def
test_pass_through
(
self
,
width
,
height
,
filters
,
downsample
):
(
"oddball"
,
223
,
223
,
32
,
False
))
mod
=
1
def
test_pass_through
(
self
,
width
,
height
,
filters
,
downsample
):
if
downsample
:
mod
=
1
mod
=
2
if
downsample
:
x
=
ks
.
Input
(
shape
=
(
width
,
height
,
filters
))
mod
=
2
test_layer
=
layer
(
filters
=
filters
,
downsample
=
downsample
)
x
=
ks
.
Input
(
shape
=
(
width
,
height
,
filters
))
outx
=
test_layer
(
x
)
test_layer
=
layer
(
filters
=
filters
,
downsample
=
downsample
)
print
(
outx
)
outx
=
test_layer
(
x
)
print
(
outx
.
shape
.
as_list
())
print
(
outx
)
self
.
assertAllEqual
(
print
(
outx
.
shape
.
as_list
())
outx
.
shape
.
as_list
(),
self
.
assertAllEqual
(
[
None
,
np
.
ceil
(
width
/
mod
),
outx
.
shape
.
as_list
(),
np
.
ceil
(
height
/
mod
),
filters
])
[
None
,
np
.
ceil
(
width
/
mod
),
return
np
.
ceil
(
height
/
mod
),
filters
])
return
@
parameterized
.
named_parameters
((
"same"
,
64
,
224
,
224
,
False
),
(
"downsample"
,
32
,
223
,
223
,
True
),
@
parameterized
.
named_parameters
((
"same"
,
64
,
224
,
224
,
False
),
(
"oddball"
,
32
,
223
,
223
,
False
))
(
"downsample"
,
32
,
223
,
223
,
True
),
def
test_gradient_pass_though
(
self
,
filters
,
width
,
height
,
downsample
):
(
"oddball"
,
32
,
223
,
223
,
False
))
loss
=
ks
.
losses
.
MeanSquaredError
()
def
test_gradient_pass_though
(
self
,
filters
,
width
,
height
,
downsample
):
optimizer
=
ks
.
optimizers
.
SGD
()
loss
=
ks
.
losses
.
MeanSquaredError
()
test_layer
=
layer
(
filters
,
downsample
=
downsample
)
optimizer
=
ks
.
optimizers
.
SGD
()
test_layer
=
layer
(
filters
,
downsample
=
downsample
)
if
downsample
:
mod
=
2
if
downsample
:
else
:
mod
=
2
mod
=
1
else
:
mod
=
1
init
=
tf
.
random_normal_initializer
()
x
=
tf
.
Variable
(
initial_value
=
init
(
shape
=
(
1
,
width
,
height
,
filters
),
init
=
tf
.
random_normal_initializer
()
dtype
=
tf
.
float32
))
x
=
tf
.
Variable
(
y
=
tf
.
Variable
(
initial_value
=
init
(
shape
=
(
1
,
int
(
np
.
ceil
(
width
/
mod
)),
initial_value
=
init
(
shape
=
(
1
,
width
,
height
,
filters
),
dtype
=
tf
.
float32
))
int
(
np
.
ceil
(
height
/
mod
)),
y
=
tf
.
Variable
(
initial_value
=
init
(
shape
=
(
1
,
int
(
np
.
ceil
(
width
/
mod
)),
filters
),
int
(
np
.
ceil
(
height
/
mod
)),
dtype
=
tf
.
float32
))
filters
),
dtype
=
tf
.
float32
))
with
tf
.
GradientTape
()
as
tape
:
x_hat
=
test_layer
(
x
)
with
tf
.
GradientTape
()
as
tape
:
grad_loss
=
loss
(
x_hat
,
y
)
x_hat
=
test_layer
(
x
)
grad
=
tape
.
gradient
(
grad_loss
,
test_layer
.
trainable_variables
)
grad_loss
=
loss
(
x_hat
,
y
)
optimizer
.
apply_gradients
(
zip
(
grad
,
test_layer
.
trainable_variables
))
grad
=
tape
.
gradient
(
grad_loss
,
test_layer
.
trainable_variables
)
optimizer
.
apply_gradients
(
zip
(
grad
,
test_layer
.
trainable_variables
))
self
.
assertNotIn
(
None
,
grad
)
return
self
.
assertNotIn
(
None
,
grad
)
return
if
__name__
==
"__main__"
:
if
__name__
==
"__main__"
:
tf
.
test
.
main
()
tf
.
test
.
main
()
official/vision/beta/projects/yolo/modeling/tests/test_DarkTiny.py
View file @
e43d75f3
...
@@ -7,44 +7,44 @@ from official.vision.beta.projects.yolo.modeling.building_blocks import DarkTiny
...
@@ -7,44 +7,44 @@ from official.vision.beta.projects.yolo.modeling.building_blocks import DarkTiny
class
DarkTinyTest
(
tf
.
test
.
TestCase
,
parameterized
.
TestCase
):
class
DarkTinyTest
(
tf
.
test
.
TestCase
,
parameterized
.
TestCase
):
@
parameterized
.
named_parameters
((
"middle"
,
224
,
224
,
64
,
2
),
(
"last
"
,
224
,
224
,
1024
,
1
))
@
parameterized
.
named_parameters
((
"middle
"
,
224
,
224
,
64
,
2
),
def
test_pass_through
(
self
,
width
,
height
,
filters
,
strides
):
(
"last"
,
224
,
224
,
1024
,
1
))
x
=
ks
.
Input
(
shape
=
(
width
,
height
,
filters
))
def
test_pass_through
(
self
,
width
,
height
,
filters
,
strides
):
test_layer
=
DarkTiny
(
filters
=
filters
,
strides
=
stride
s
)
x
=
ks
.
Input
(
shape
=
(
width
,
height
,
filter
s
)
)
outx
=
test_layer
(
x
)
test_layer
=
DarkTiny
(
filters
=
filters
,
strides
=
strides
)
print
(
out
x
)
outx
=
test_layer
(
x
)
print
(
outx
.
shape
.
as_list
()
)
print
(
outx
)
self
.
assertEqual
(
width
%
strides
,
0
,
msg
=
"width % strides != 0"
)
print
(
outx
.
shape
.
as_list
()
)
self
.
assertEqual
(
height
%
strides
,
0
,
msg
=
"
height
% strides != 0"
)
self
.
assertEqual
(
width
%
strides
,
0
,
msg
=
"
width
% strides != 0"
)
self
.
assert
All
Equal
(
self
.
assertEqual
(
height
%
strides
,
0
,
msg
=
"height % strides != 0"
)
outx
.
shape
.
as_list
(),
self
.
assertAllEqual
(
outx
.
shape
.
as_list
(),
[
None
,
width
//
strides
,
height
//
strides
,
filters
])
[
None
,
width
//
strides
,
height
//
strides
,
filters
])
return
return
@
parameterized
.
named_parameters
((
"middle"
,
224
,
224
,
64
,
2
),
@
parameterized
.
named_parameters
((
"middle"
,
224
,
224
,
64
,
2
),
(
"last"
,
224
,
224
,
1024
,
1
))
(
"last"
,
224
,
224
,
1024
,
1
))
def
test_gradient_pass_though
(
self
,
width
,
height
,
filters
,
strides
):
def
test_gradient_pass_though
(
self
,
width
,
height
,
filters
,
strides
):
loss
=
ks
.
losses
.
MeanSquaredError
()
loss
=
ks
.
losses
.
MeanSquaredError
()
optimizer
=
ks
.
optimizers
.
SGD
()
optimizer
=
ks
.
optimizers
.
SGD
()
test_layer
=
DarkTiny
(
filters
=
filters
,
strides
=
strides
)
test_layer
=
DarkTiny
(
filters
=
filters
,
strides
=
strides
)
init
=
tf
.
random_normal_initializer
()
init
=
tf
.
random_normal_initializer
()
x
=
tf
.
Variable
(
initial_value
=
init
(
shape
=
(
1
,
width
,
height
,
filters
),
x
=
tf
.
Variable
(
dtype
=
tf
.
float32
))
initial_value
=
init
(
shape
=
(
1
,
width
,
height
,
filters
),
dtype
=
tf
.
float32
))
y
=
tf
.
Variable
(
initial_value
=
init
(
shape
=
(
1
,
width
//
strides
,
y
=
tf
.
Variable
(
initial_value
=
init
(
shape
=
(
1
,
width
//
strides
,
height
//
strides
,
filters
),
height
//
strides
,
filters
),
dtype
=
tf
.
float32
))
dtype
=
tf
.
float32
))
with
tf
.
GradientTape
()
as
tape
:
with
tf
.
GradientTape
()
as
tape
:
x_hat
=
test_layer
(
x
)
x_hat
=
test_layer
(
x
)
grad_loss
=
loss
(
x_hat
,
y
)
grad_loss
=
loss
(
x_hat
,
y
)
grad
=
tape
.
gradient
(
grad_loss
,
test_layer
.
trainable_variables
)
grad
=
tape
.
gradient
(
grad_loss
,
test_layer
.
trainable_variables
)
optimizer
.
apply_gradients
(
zip
(
grad
,
test_layer
.
trainable_variables
))
optimizer
.
apply_gradients
(
zip
(
grad
,
test_layer
.
trainable_variables
))
self
.
assertNotIn
(
None
,
grad
)
self
.
assertNotIn
(
None
,
grad
)
return
return
if
__name__
==
"__main__"
:
if
__name__
==
"__main__"
:
tf
.
test
.
main
()
tf
.
test
.
main
()
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment