Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ModelZoo
ResNet50_tensorflow
Commits
e43d75f3
"vscode:/vscode.git/clone" did not exist on "364b4f24a09a2d9ce3b924461a673e5cc2be3976"
Commit
e43d75f3
authored
Oct 21, 2020
by
anivegesana
Browse files
Lint YOLO backbone and building block code
parent
5122a448
Changes
14
Hide whitespace changes
Inline
Side-by-side
Showing
14 changed files
with
1117 additions
and
1085 deletions
+1117
-1085
official/vision/beta/projects/yolo/configs/backbones.py
official/vision/beta/projects/yolo/configs/backbones.py
+5
-3
official/vision/beta/projects/yolo/modeling/backbones/Darknet.py
...l/vision/beta/projects/yolo/modeling/backbones/Darknet.py
+267
-245
official/vision/beta/projects/yolo/modeling/building_blocks/_CSPConnect.py
...eta/projects/yolo/modeling/building_blocks/_CSPConnect.py
+63
-62
official/vision/beta/projects/yolo/modeling/building_blocks/_CSPDownSample.py
.../projects/yolo/modeling/building_blocks/_CSPDownSample.py
+73
-72
official/vision/beta/projects/yolo/modeling/building_blocks/_CSPTiny.py
...n/beta/projects/yolo/modeling/building_blocks/_CSPTiny.py
+138
-136
official/vision/beta/projects/yolo/modeling/building_blocks/_DarkConv.py
.../beta/projects/yolo/modeling/building_blocks/_DarkConv.py
+136
-137
official/vision/beta/projects/yolo/modeling/building_blocks/_DarkResidual.py
...a/projects/yolo/modeling/building_blocks/_DarkResidual.py
+118
-118
official/vision/beta/projects/yolo/modeling/building_blocks/_DarkTiny.py
.../beta/projects/yolo/modeling/building_blocks/_DarkTiny.py
+88
-86
official/vision/beta/projects/yolo/modeling/building_blocks/__init__.py
...n/beta/projects/yolo/modeling/building_blocks/__init__.py
+0
-1
official/vision/beta/projects/yolo/modeling/tests/test_CSPConnect.py
...sion/beta/projects/yolo/modeling/tests/test_CSPConnect.py
+43
-42
official/vision/beta/projects/yolo/modeling/tests/test_CSPDownSample.py
...n/beta/projects/yolo/modeling/tests/test_CSPDownSample.py
+42
-41
official/vision/beta/projects/yolo/modeling/tests/test_DarkConv.py
...vision/beta/projects/yolo/modeling/tests/test_DarkConv.py
+57
-56
official/vision/beta/projects/yolo/modeling/tests/test_DarkResidual.py
...on/beta/projects/yolo/modeling/tests/test_DarkResidual.py
+49
-48
official/vision/beta/projects/yolo/modeling/tests/test_DarkTiny.py
...vision/beta/projects/yolo/modeling/tests/test_DarkTiny.py
+38
-38
No files found.
official/vision/beta/projects/yolo/configs/backbones.py
View file @
e43d75f3
...
@@ -6,11 +6,13 @@ from official.modeling import hyperparams
...
@@ -6,11 +6,13 @@ from official.modeling import hyperparams
from
official.vision.beta.configs
import
backbones
from
official.vision.beta.configs
import
backbones
@
dataclasses
.
dataclass
@
dataclasses
.
dataclass
class
DarkNet
(
hyperparams
.
Config
):
class
DarkNet
(
hyperparams
.
Config
):
"""DarkNet config."""
"""DarkNet config."""
model_id
:
str
=
"darknet53"
model_id
:
str
=
"darknet53"
@
dataclasses
.
dataclass
@
dataclasses
.
dataclass
class
Backbone
(
backbones
.
Backbone
):
class
Backbone
(
backbones
.
Backbone
):
darknet
:
DarkNet
=
DarkNet
()
darknet
:
DarkNet
=
DarkNet
()
official/vision/beta/projects/yolo/modeling/backbones/Darknet.py
View file @
e43d75f3
...
@@ -5,11 +5,13 @@ import collections
...
@@ -5,11 +5,13 @@ import collections
from
official.vision.beta.modeling.backbones
import
factory
from
official.vision.beta.modeling.backbones
import
factory
from
official.vision.beta.projects.yolo.modeling
import
building_blocks
as
nn_blocks
from
official.vision.beta.projects.yolo.modeling
import
building_blocks
as
nn_blocks
# builder required classes
# builder required classes
class
BlockConfig
(
object
):
class
BlockConfig
(
object
):
def
__init__
(
self
,
layer
,
stack
,
reps
,
bottleneck
,
filters
,
kernel_size
,
strides
,
padding
,
activation
,
route
,
output_name
,
is_output
):
def
__init__
(
self
,
layer
,
stack
,
reps
,
bottleneck
,
filters
,
kernel_size
,
'''
strides
,
padding
,
activation
,
route
,
output_name
,
is_output
):
'''
get layer config to make code more readable
get layer config to make code more readable
Args:
Args:
...
@@ -20,70 +22,84 @@ class BlockConfig(object):
...
@@ -20,70 +22,84 @@ class BlockConfig(object):
downsample: boolean, to down sample the input width and height
downsample: boolean, to down sample the input width and height
output: boolean, true if the layer is required as an output
output: boolean, true if the layer is required as an output
'''
'''
self
.
layer
=
layer
self
.
layer
=
layer
self
.
stack
=
stack
self
.
stack
=
stack
self
.
repetitions
=
reps
self
.
repetitions
=
reps
self
.
bottleneck
=
bottleneck
self
.
bottleneck
=
bottleneck
self
.
filters
=
filters
self
.
filters
=
filters
self
.
kernel_size
=
kernel_size
self
.
kernel_size
=
kernel_size
self
.
strides
=
strides
self
.
strides
=
strides
self
.
padding
=
padding
self
.
padding
=
padding
self
.
activation
=
activation
self
.
activation
=
activation
self
.
route
=
route
self
.
route
=
route
self
.
output_name
=
output_name
self
.
output_name
=
output_name
self
.
is_output
=
is_output
self
.
is_output
=
is_output
return
return
def
build_block_specs
(
config
):
def
build_block_specs
(
config
):
specs
=
[]
specs
=
[]
for
layer
in
config
:
for
layer
in
config
:
specs
.
append
(
BlockConfig
(
*
layer
))
specs
.
append
(
BlockConfig
(
*
layer
))
return
specs
return
specs
def
darkconv_config_todict
(
config
,
kwargs
):
def
darkconv_config_todict
(
config
,
kwargs
):
dictvals
=
{
dictvals
=
{
"filters"
:
config
.
filters
,
"filters"
:
config
.
filters
,
"kernel_size"
:
config
.
kernel_size
,
"kernel_size"
:
config
.
kernel_size
,
"strides"
:
config
.
strides
,
"strides"
:
config
.
strides
,
"padding"
:
config
.
padding
"padding"
:
config
.
padding
}
}
dictvals
.
update
(
kwargs
)
dictvals
.
update
(
kwargs
)
return
dictvals
return
dictvals
def
darktiny_config_todict
(
config
,
kwargs
):
def
darktiny_config_todict
(
config
,
kwargs
):
dictvals
=
{
dictvals
=
{
"filters"
:
config
.
filters
,
"strides"
:
config
.
strides
}
"filters"
:
config
.
filters
,
dictvals
.
update
(
kwargs
)
"strides"
:
config
.
strides
}
return
dictvals
dictvals
.
update
(
kwargs
)
return
dictvals
def
maxpool_config_todict
(
config
,
kwargs
):
def
maxpool_config_todict
(
config
,
kwargs
):
return
{
"pool_size"
:
config
.
kernel_size
,
return
{
"strides"
:
config
.
strides
,
"pool_size"
:
config
.
kernel_size
,
"padding"
:
config
.
padding
,
"strides"
:
config
.
strides
,
"name"
:
kwargs
[
"name"
]}
"padding"
:
config
.
padding
,
"name"
:
kwargs
[
"name"
]
}
class
layer_registry
(
object
):
class
layer_registry
(
object
):
def
__init__
(
self
):
self
.
_layer_dict
=
{
"DarkTiny"
:
(
nn_blocks
.
DarkTiny
,
darktiny_config_todict
),
"DarkConv"
:
(
nn_blocks
.
DarkConv
,
darkconv_config_todict
),
"MaxPool"
:
(
tf
.
keras
.
layers
.
MaxPool2D
,
maxpool_config_todict
)}
return
def
_get_layer
(
self
,
key
):
def
__init__
(
self
):
return
self
.
_layer_dict
[
key
]
self
.
_layer_dict
=
{
"DarkTiny"
:
(
nn_blocks
.
DarkTiny
,
darktiny_config_todict
),
"DarkConv"
:
(
nn_blocks
.
DarkConv
,
darkconv_config_todict
),
"MaxPool"
:
(
tf
.
keras
.
layers
.
MaxPool2D
,
maxpool_config_todict
)
}
return
def
_get_layer
(
self
,
key
):
return
self
.
_layer_dict
[
key
]
def
__call__
(
self
,
config
,
kwargs
):
layer
,
get_param_dict
=
self
.
_get_layer
(
config
.
layer
)
param_dict
=
get_param_dict
(
config
,
kwargs
)
return
layer
(
**
param_dict
)
def
__call__
(
self
,
config
,
kwargs
):
layer
,
get_param_dict
=
self
.
_get_layer
(
config
.
layer
)
param_dict
=
get_param_dict
(
config
,
kwargs
)
return
layer
(
**
param_dict
)
# model configs
# model configs
LISTNAMES
=
[
"default_layer_name"
,
"level_type"
,
"number_of_layers_in_level"
,
"bottleneck"
,
"filters"
,
"kernal_size"
,
"strides"
,
"padding"
,
"default_activation"
,
"route"
,
"level/name"
,
"is_output"
]
LISTNAMES
=
[
"default_layer_name"
,
"level_type"
,
"number_of_layers_in_level"
,
"bottleneck"
,
"filters"
,
"kernal_size"
,
"strides"
,
"padding"
,
"default_activation"
,
"route"
,
"level/name"
,
"is_output"
]
CSPDARKNET53
=
{
CSPDARKNET53
=
{
"list_names"
:
LISTNAMES
,
"list_names"
:
LISTNAMES
,
"splits"
:
{
"backbone_split"
:
106
,
"splits"
:
{
"backbone_split"
:
106
,
"neck_split"
:
138
},
"neck_split"
:
138
},
"backbone"
:
[
"backbone"
:
[
[
"DarkConv"
,
None
,
1
,
False
,
32
,
3
,
1
,
"same"
,
"mish"
,
-
1
,
0
,
False
],
# 1
[
"DarkConv"
,
None
,
1
,
False
,
32
,
3
,
1
,
"same"
,
"mish"
,
-
1
,
0
,
False
],
# 1
[
"DarkRes"
,
"csp"
,
1
,
True
,
64
,
None
,
None
,
None
,
"mish"
,
-
1
,
1
,
False
],
# 3
[
"DarkRes"
,
"csp"
,
1
,
True
,
64
,
None
,
None
,
None
,
"mish"
,
-
1
,
1
,
False
],
# 3
...
@@ -91,7 +107,7 @@ CSPDARKNET53 = {
...
@@ -91,7 +107,7 @@ CSPDARKNET53 = {
[
"DarkRes"
,
"csp"
,
8
,
False
,
256
,
None
,
None
,
None
,
"mish"
,
-
1
,
3
,
True
],
[
"DarkRes"
,
"csp"
,
8
,
False
,
256
,
None
,
None
,
None
,
"mish"
,
-
1
,
3
,
True
],
[
"DarkRes"
,
"csp"
,
8
,
False
,
512
,
None
,
None
,
None
,
"mish"
,
-
1
,
4
,
True
],
# 3
[
"DarkRes"
,
"csp"
,
8
,
False
,
512
,
None
,
None
,
None
,
"mish"
,
-
1
,
4
,
True
],
# 3
[
"DarkRes"
,
"csp"
,
4
,
False
,
1024
,
None
,
None
,
None
,
"mish"
,
-
1
,
5
,
True
],
# 6 #route
[
"DarkRes"
,
"csp"
,
4
,
False
,
1024
,
None
,
None
,
None
,
"mish"
,
-
1
,
5
,
True
],
# 6 #route
]
]
}
}
DARKNET53
=
{
DARKNET53
=
{
...
@@ -103,8 +119,8 @@ DARKNET53 = {
...
@@ -103,8 +119,8 @@ DARKNET53 = {
[
"DarkRes"
,
"residual"
,
2
,
False
,
128
,
None
,
None
,
None
,
"leaky"
,
-
1
,
2
,
False
],
# 2
[
"DarkRes"
,
"residual"
,
2
,
False
,
128
,
None
,
None
,
None
,
"leaky"
,
-
1
,
2
,
False
],
# 2
[
"DarkRes"
,
"residual"
,
8
,
False
,
256
,
None
,
None
,
None
,
"leaky"
,
-
1
,
3
,
True
],
[
"DarkRes"
,
"residual"
,
8
,
False
,
256
,
None
,
None
,
None
,
"leaky"
,
-
1
,
3
,
True
],
[
"DarkRes"
,
"residual"
,
8
,
False
,
512
,
None
,
None
,
None
,
"leaky"
,
-
1
,
4
,
True
],
# 3
[
"DarkRes"
,
"residual"
,
8
,
False
,
512
,
None
,
None
,
None
,
"leaky"
,
-
1
,
4
,
True
],
# 3
[
"DarkRes"
,
"residual"
,
4
,
False
,
1024
,
None
,
None
,
None
,
"leaky"
,
-
1
,
5
,
True
],
# 6
[
"DarkRes"
,
"residual"
,
4
,
False
,
1024
,
None
,
None
,
None
,
"leaky"
,
-
1
,
5
,
True
],
# 6
]
]
}
}
CSPDARKNETTINY
=
{
CSPDARKNETTINY
=
{
...
@@ -117,7 +133,7 @@ CSPDARKNETTINY = {
...
@@ -117,7 +133,7 @@ CSPDARKNETTINY = {
[
"CSPTiny"
,
"csp_tiny"
,
1
,
False
,
128
,
3
,
2
,
"same"
,
"leaky"
,
-
1
,
3
,
False
],
# 3
[
"CSPTiny"
,
"csp_tiny"
,
1
,
False
,
128
,
3
,
2
,
"same"
,
"leaky"
,
-
1
,
3
,
False
],
# 3
[
"CSPTiny"
,
"csp_tiny"
,
1
,
False
,
256
,
3
,
2
,
"same"
,
"leaky"
,
-
1
,
4
,
True
],
# 3
[
"CSPTiny"
,
"csp_tiny"
,
1
,
False
,
256
,
3
,
2
,
"same"
,
"leaky"
,
-
1
,
4
,
True
],
# 3
[
"DarkConv"
,
None
,
1
,
False
,
512
,
3
,
1
,
"same"
,
"leaky"
,
-
1
,
5
,
True
],
# 1
[
"DarkConv"
,
None
,
1
,
False
,
512
,
3
,
1
,
"same"
,
"leaky"
,
-
1
,
5
,
True
],
# 1
]
]
}
}
DARKNETTINY
=
{
DARKNETTINY
=
{
...
@@ -131,204 +147,210 @@ DARKNETTINY = {
...
@@ -131,204 +147,210 @@ DARKNETTINY = {
[
"DarkTiny"
,
None
,
1
,
False
,
256
,
3
,
2
,
"same"
,
"leaky"
,
-
1
,
4
,
True
],
[
"DarkTiny"
,
None
,
1
,
False
,
256
,
3
,
2
,
"same"
,
"leaky"
,
-
1
,
4
,
True
],
[
"DarkTiny"
,
None
,
1
,
False
,
512
,
3
,
2
,
"same"
,
"leaky"
,
-
1
,
5
,
False
],
# 3
[
"DarkTiny"
,
None
,
1
,
False
,
512
,
3
,
2
,
"same"
,
"leaky"
,
-
1
,
5
,
False
],
# 3
[
"DarkTiny"
,
None
,
1
,
False
,
1024
,
3
,
1
,
"same"
,
"leaky"
,
-
1
,
5
,
True
],
# 6 #route
[
"DarkTiny"
,
None
,
1
,
False
,
1024
,
3
,
1
,
"same"
,
"leaky"
,
-
1
,
5
,
True
],
# 6 #route
]
]
}
BACKBONES
=
{
"darknettiny"
:
DARKNETTINY
,
"darknet53"
:
DARKNET53
,
"cspdarknet53"
:
CSPDARKNET53
,
"cspdarknettiny"
:
CSPDARKNETTINY
}
}
BACKBONES
=
{
"darknettiny"
:
DARKNETTINY
,
"darknet53"
:
DARKNET53
,
"cspdarknet53"
:
CSPDARKNET53
,
"cspdarknettiny"
:
CSPDARKNETTINY
}
@
ks
.
utils
.
register_keras_serializable
(
package
=
'yolo'
)
@
ks
.
utils
.
register_keras_serializable
(
package
=
'yolo'
)
class
Darknet
(
ks
.
Model
):
class
Darknet
(
ks
.
Model
):
def
__init__
(
self
,
model_id
=
"darknet53"
,
def
__init__
(
input_shape
=
tf
.
keras
.
layers
.
InputSpec
(
shape
=
[
None
,
None
,
None
,
3
]),
self
,
min_size
=
None
,
model_id
=
"darknet53"
,
max_size
=
5
,
input_shape
=
tf
.
keras
.
layers
.
InputSpec
(
shape
=
[
None
,
None
,
None
,
3
]),
activation
=
None
,
min_size
=
None
,
use_sync_bn
=
False
,
max_size
=
5
,
norm_momentum
=
0.99
,
activation
=
None
,
norm_epsilon
=
0.001
,
use_sync_bn
=
False
,
kernel_initializer
=
'glorot_uniform'
,
norm_momentum
=
0.99
,
kernel_regularizer
=
None
,
norm_epsilon
=
0.001
,
bias_regularizer
=
None
,
kernel_initializer
=
'glorot_uniform'
,
config
=
None
,
kernel_regularizer
=
None
,
**
kwargs
):
bias_regularizer
=
None
,
config
=
None
,
layer_specs
,
splits
=
Darknet
.
get_config
(
model_id
)
**
kwargs
):
self
.
_model_name
=
model_id
self
.
_splits
=
splits
layer_specs
,
splits
=
Darknet
.
get_config
(
model_id
)
self
.
_input_shape
=
input_shape
self
.
_model_name
=
model_id
self
.
_registry
=
layer_registry
()
self
.
_splits
=
splits
self
.
_input_shape
=
input_shape
# default layer look up
self
.
_registry
=
layer_registry
()
self
.
_min_size
=
min_size
self
.
_max_size
=
max_size
# default layer look up
self
.
_output_specs
=
None
self
.
_min_size
=
min_size
self
.
_max_size
=
max_size
self
.
_kernel_initializer
=
kernel_initializer
self
.
_output_specs
=
None
self
.
_bias_regularizer
=
bias_regularizer
self
.
_norm_momentum
=
norm_momentum
self
.
_kernel_initializer
=
kernel_initializer
self
.
_norm_epislon
=
norm_epsilon
self
.
_bias_regularizer
=
bias_regularizer
self
.
_use_sync_bn
=
use_sync_bn
self
.
_norm_momentum
=
norm_momentum
self
.
_activation
=
activation
self
.
_norm_epislon
=
norm_epsilon
self
.
_weight_decay
=
kernel_regularizer
self
.
_use_sync_bn
=
use_sync_bn
self
.
_activation
=
activation
self
.
_default_dict
=
{
"kernel_initializer"
:
self
.
_kernel_initializer
,
self
.
_weight_decay
=
kernel_regularizer
"weight_decay"
:
self
.
_weight_decay
,
"bias_regularizer"
:
self
.
_bias_regularizer
,
self
.
_default_dict
=
{
"norm_momentum"
:
self
.
_norm_momentum
,
"kernel_initializer"
:
self
.
_kernel_initializer
,
"norm_epsilon"
:
self
.
_norm_epislon
,
"weight_decay"
:
self
.
_weight_decay
,
"use_sync_bn"
:
self
.
_use_sync_bn
,
"bias_regularizer"
:
self
.
_bias_regularizer
,
"activation"
:
self
.
_activation
,
"norm_momentum"
:
self
.
_norm_momentum
,
"name"
:
None
}
"norm_epsilon"
:
self
.
_norm_epislon
,
"use_sync_bn"
:
self
.
_use_sync_bn
,
inputs
=
ks
.
layers
.
Input
(
shape
=
self
.
_input_shape
.
shape
[
1
:])
"activation"
:
self
.
_activation
,
output
=
self
.
_build_struct
(
layer_specs
,
inputs
)
"name"
:
None
super
().
__init__
(
inputs
=
inputs
,
outputs
=
output
,
name
=
self
.
_model_name
)
}
return
inputs
=
ks
.
layers
.
Input
(
shape
=
self
.
_input_shape
.
shape
[
1
:])
@
property
output
=
self
.
_build_struct
(
layer_specs
,
inputs
)
def
input_specs
(
self
):
super
().
__init__
(
inputs
=
inputs
,
outputs
=
output
,
name
=
self
.
_model_name
)
return
self
.
_input_shape
return
@
property
@
property
def
output_specs
(
self
):
def
input_specs
(
self
):
return
self
.
_output_specs
return
self
.
_input_shape
@
property
@
property
def
splits
(
self
):
def
output_specs
(
self
):
return
self
.
_splits
return
self
.
_output_specs
def
_build_struct
(
self
,
net
,
inputs
):
@
property
endpoints
=
collections
.
OrderedDict
()
def
splits
(
self
):
stack_outputs
=
[
inputs
]
return
self
.
_splits
for
i
,
config
in
enumerate
(
net
):
if
config
.
stack
==
None
:
def
_build_struct
(
self
,
net
,
inputs
):
x
=
self
.
_build_block
(
stack_outputs
[
config
.
route
],
endpoints
=
collections
.
OrderedDict
()
config
,
stack_outputs
=
[
inputs
]
name
=
f
"
{
config
.
layer
}
_
{
i
}
"
)
for
i
,
config
in
enumerate
(
net
):
stack_outputs
.
append
(
x
)
if
config
.
stack
==
None
:
elif
config
.
stack
==
"residual"
:
x
=
self
.
_build_block
(
stack_outputs
[
config
.
route
],
x
=
self
.
_residual_stack
(
stack_outputs
[
config
.
route
],
config
,
config
,
name
=
f
"
{
config
.
layer
}
_
{
i
}
"
)
name
=
f
"
{
config
.
layer
}
_
{
i
}
"
)
stack_outputs
.
append
(
x
)
stack_outputs
.
append
(
x
)
elif
config
.
stack
==
"residual"
:
elif
config
.
stack
==
"csp"
:
x
=
self
.
_residual_stack
(
stack_outputs
[
config
.
route
],
x
=
self
.
_csp_stack
(
stack_outputs
[
config
.
route
],
config
,
config
,
name
=
f
"
{
config
.
layer
}
_
{
i
}
"
)
name
=
f
"
{
config
.
layer
}
_
{
i
}
"
)
stack_outputs
.
append
(
x
)
stack_outputs
.
append
(
x
)
elif
config
.
stack
==
"csp"
:
elif
config
.
stack
==
"csp_tiny"
:
x
=
self
.
_csp_stack
(
stack_outputs
[
config
.
route
],
x_pass
,
x
=
self
.
_tiny_stack
(
stack_outputs
[
config
.
route
],
config
,
config
,
name
=
f
"
{
config
.
layer
}
_
{
i
}
"
)
name
=
f
"
{
config
.
layer
}
_
{
i
}
"
)
stack_outputs
.
append
(
x
)
stack_outputs
.
append
(
x_pass
)
elif
config
.
stack
==
"csp_tiny"
:
if
(
config
.
is_output
and
self
.
_min_size
==
None
):
# or isinstance(config.output_name, str):
x_pass
,
x
=
self
.
_tiny_stack
(
stack_outputs
[
config
.
route
],
endpoints
[
config
.
output_name
]
=
x
config
,
elif
self
.
_min_size
!=
None
and
config
.
output_name
>=
self
.
_min_size
and
config
.
output_name
<=
self
.
_max_size
:
name
=
f
"
{
config
.
layer
}
_
{
i
}
"
)
endpoints
[
config
.
output_name
]
=
x
stack_outputs
.
append
(
x_pass
)
if
(
config
.
is_output
and
self
.
_output_specs
=
{
l
:
endpoints
[
l
].
get_shape
()
for
l
in
endpoints
.
keys
()}
self
.
_min_size
==
None
):
# or isinstance(config.output_name, str):
return
endpoints
endpoints
[
config
.
output_name
]
=
x
elif
self
.
_min_size
!=
None
and
config
.
output_name
>=
self
.
_min_size
and
config
.
output_name
<=
self
.
_max_size
:
def
_get_activation
(
self
,
activation
):
endpoints
[
config
.
output_name
]
=
x
if
self
.
_activation
==
None
:
return
activation
self
.
_output_specs
=
{
l
:
endpoints
[
l
].
get_shape
()
for
l
in
endpoints
.
keys
()}
else
:
return
endpoints
return
self
.
_activation
def
_get_activation
(
self
,
activation
):
def
_csp_stack
(
self
,
inputs
,
config
,
name
):
if
self
.
_activation
==
None
:
if
config
.
bottleneck
:
return
activation
csp_filter_reduce
=
1
else
:
residual_filter_reduce
=
2
return
self
.
_activation
scale_filters
=
1
else
:
def
_csp_stack
(
self
,
inputs
,
config
,
name
):
csp_filter_reduce
=
2
if
config
.
bottleneck
:
residual_filter_reduce
=
1
csp_filter_reduce
=
1
scale_filters
=
2
residual_filter_reduce
=
2
self
.
_default_dict
[
"activation"
]
=
self
.
_get_activation
(
config
.
activation
)
scale_filters
=
1
self
.
_default_dict
[
"name"
]
=
f
"
{
name
}
_csp_down"
else
:
x
,
x_route
=
nn_blocks
.
CSPDownSample
(
filters
=
config
.
filters
,
csp_filter_reduce
=
2
filter_reduce
=
csp_filter_reduce
,
residual_filter_reduce
=
1
**
self
.
_default_dict
)(
inputs
)
scale_filters
=
2
for
i
in
range
(
config
.
repetitions
):
self
.
_default_dict
[
"activation"
]
=
self
.
_get_activation
(
config
.
activation
)
self
.
_default_dict
[
"name"
]
=
f
"
{
name
}
_
{
i
}
"
self
.
_default_dict
[
"name"
]
=
f
"
{
name
}
_csp_down"
x
=
nn_blocks
.
DarkResidual
(
x
,
x_route
=
nn_blocks
.
CSPDownSample
(
filters
=
config
.
filters
,
filters
=
config
.
filters
//
scale_filters
,
filter_reduce
=
csp_filter_reduce
,
filter_scale
=
residual_filter_reduce
,
**
self
.
_default_dict
)(
inputs
)
**
self
.
_default_dict
)(
x
)
for
i
in
range
(
config
.
repetitions
):
self
.
_default_dict
[
"name"
]
=
f
"
{
name
}
_
{
i
}
"
self
.
_default_dict
[
"name"
]
=
f
"
{
name
}
_csp_connect"
x
=
nn_blocks
.
DarkResidual
(
filters
=
config
.
filters
//
scale_filters
,
output
=
nn_blocks
.
CSPConnect
(
filters
=
config
.
filters
,
filter_scale
=
residual_filter_reduce
,
filter_reduce
=
csp_filter_reduce
,
**
self
.
_default_dict
)(
x
)
**
self
.
_default_dict
)([
x
,
x_route
])
self
.
_default_dict
[
"activation"
]
=
self
.
_activation
self
.
_default_dict
[
"name"
]
=
f
"
{
name
}
_csp_connect"
self
.
_default_dict
[
"name"
]
=
None
output
=
nn_blocks
.
CSPConnect
(
filters
=
config
.
filters
,
return
output
filter_reduce
=
csp_filter_reduce
,
**
self
.
_default_dict
)([
x
,
x_route
])
def
_tiny_stack
(
self
,
inputs
,
config
,
name
):
self
.
_default_dict
[
"activation"
]
=
self
.
_activation
self
.
_default_dict
[
"activation"
]
=
self
.
_get_activation
(
config
.
activation
)
self
.
_default_dict
[
"name"
]
=
None
self
.
_default_dict
[
"name"
]
=
f
"
{
name
}
_tiny"
return
output
x
,
x_route
=
nn_blocks
.
CSPTiny
(
filters
=
config
.
filters
,
**
self
.
_default_dict
)(
inputs
)
def
_tiny_stack
(
self
,
inputs
,
config
,
name
):
self
.
_default_dict
[
"activation"
]
=
self
.
_activation
self
.
_default_dict
[
"activation"
]
=
self
.
_get_activation
(
config
.
activation
)
self
.
_default_dict
[
"name"
]
=
None
self
.
_default_dict
[
"name"
]
=
f
"
{
name
}
_tiny"
return
x
,
x_route
x
,
x_route
=
nn_blocks
.
CSPTiny
(
filters
=
config
.
filters
,
**
self
.
_default_dict
)(
inputs
)
def
_residual_stack
(
self
,
inputs
,
config
,
name
):
self
.
_default_dict
[
"activation"
]
=
self
.
_activation
self
.
_default_dict
[
"activation"
]
=
self
.
_get_activation
(
config
.
activation
)
self
.
_default_dict
[
"name"
]
=
None
self
.
_default_dict
[
"name"
]
=
f
"
{
name
}
_residual_down"
return
x
,
x_route
x
=
nn_blocks
.
DarkResidual
(
filters
=
config
.
filters
,
def
_residual_stack
(
self
,
inputs
,
config
,
name
):
downsample
=
True
,
self
.
_default_dict
[
"activation"
]
=
self
.
_get_activation
(
config
.
activation
)
**
self
.
_default_dict
)(
inputs
)
self
.
_default_dict
[
"name"
]
=
f
"
{
name
}
_residual_down"
for
i
in
range
(
config
.
repetitions
-
1
):
x
=
nn_blocks
.
DarkResidual
(
filters
=
config
.
filters
,
self
.
_default_dict
[
"name"
]
=
f
"
{
name
}
_
{
i
}
"
downsample
=
True
,
x
=
nn_blocks
.
DarkResidual
(
**
self
.
_default_dict
)(
inputs
)
filters
=
config
.
filters
,
for
i
in
range
(
config
.
repetitions
-
1
):
**
self
.
_default_dict
)(
x
)
self
.
_default_dict
[
"name"
]
=
f
"
{
name
}
_
{
i
}
"
self
.
_default_dict
[
"activation"
]
=
self
.
_activation
x
=
nn_blocks
.
DarkResidual
(
filters
=
config
.
filters
,
self
.
_default_dict
[
"name"
]
=
None
**
self
.
_default_dict
)(
x
)
return
x
self
.
_default_dict
[
"activation"
]
=
self
.
_activation
self
.
_default_dict
[
"name"
]
=
None
def
_build_block
(
self
,
inputs
,
config
,
name
):
return
x
x
=
inputs
i
=
0
def
_build_block
(
self
,
inputs
,
config
,
name
):
self
.
_default_dict
[
"activation"
]
=
self
.
_get_activation
(
config
.
activation
)
x
=
inputs
while
i
<
config
.
repetitions
:
i
=
0
self
.
_default_dict
[
"name"
]
=
f
"
{
name
}
_
{
i
}
"
self
.
_default_dict
[
"activation"
]
=
self
.
_get_activation
(
config
.
activation
)
layer
=
self
.
_registry
(
config
,
self
.
_default_dict
)
while
i
<
config
.
repetitions
:
x
=
layer
(
x
)
self
.
_default_dict
[
"name"
]
=
f
"
{
name
}
_
{
i
}
"
i
+=
1
layer
=
self
.
_registry
(
config
,
self
.
_default_dict
)
self
.
_default_dict
[
"activation"
]
=
self
.
_activation
x
=
layer
(
x
)
self
.
_default_dict
[
"name"
]
=
None
i
+=
1
return
x
self
.
_default_dict
[
"activation"
]
=
self
.
_activation
self
.
_default_dict
[
"name"
]
=
None
@
staticmethod
return
x
def
get_model_config
(
name
):
name
=
name
.
lower
()
@
staticmethod
backbone
=
BACKBONES
[
name
][
"backbone"
]
def
get_model_config
(
name
):
splits
=
BACKBONES
[
name
][
"splits"
]
name
=
name
.
lower
()
return
build_block_specs
(
backbone
),
splits
backbone
=
BACKBONES
[
name
][
"backbone"
]
splits
=
BACKBONES
[
name
][
"splits"
]
return
build_block_specs
(
backbone
),
splits
@
factory
.
register_backbone_builder
(
'darknet'
)
@
factory
.
register_backbone_builder
(
'darknet'
)
def
build_darknet
(
def
build_darknet
(
input_specs
:
tf
.
keras
.
layers
.
InputSpec
,
input_specs
:
tf
.
keras
.
layers
.
InputSpec
,
model_config
,
model_config
,
l2_regularizer
:
tf
.
keras
.
regularizers
.
Regularizer
=
None
)
->
tf
.
keras
.
Model
:
l2_regularizer
:
tf
.
keras
.
regularizers
.
Regularizer
=
None
)
->
tf
.
keras
.
Model
:
backbone_type
=
model_config
.
backbone
.
type
backbone_type
=
model_config
.
backbone
.
type
backbone_cfg
=
model_config
.
backbone
.
get
()
backbone_cfg
=
model_config
.
backbone
.
get
()
norm_activation_config
=
model_config
.
norm_activation
norm_activation_config
=
model_config
.
norm_activation
return
Darknet
(
model_id
=
backbone_cfg
.
model_id
,
return
Darknet
(
model_id
=
backbone_cfg
.
model_id
,
input_shape
=
input_specs
,
input_shape
=
input_specs
,
activation
=
norm_activation_config
.
activation
,
activation
=
norm_activation_config
.
activation
,
use_sync_bn
=
norm_activation_config
.
use_sync_bn
,
use_sync_bn
=
norm_activation_config
.
use_sync_bn
,
norm_momentum
=
norm_activation_config
.
norm_momentum
,
norm_momentum
=
norm_activation_config
.
norm_momentum
,
norm_epsilon
=
norm_activation_config
.
norm_epsilon
,
norm_epsilon
=
norm_activation_config
.
norm_epsilon
,
kernel_regularizer
=
l2_regularizer
)
kernel_regularizer
=
l2_regularizer
)
# if __name__ == "__main__":
# if __name__ == "__main__":
...
...
official/vision/beta/projects/yolo/modeling/building_blocks/_CSPConnect.py
View file @
e43d75f3
...
@@ -5,69 +5,70 @@ from ._DarkConv import DarkConv
...
@@ -5,69 +5,70 @@ from ._DarkConv import DarkConv
@
ks
.
utils
.
register_keras_serializable
(
package
=
'yolo'
)
@
ks
.
utils
.
register_keras_serializable
(
package
=
'yolo'
)
class
CSPConnect
(
ks
.
layers
.
Layer
):
class
CSPConnect
(
ks
.
layers
.
Layer
):
def
__init__
(
self
,
filters
,
filter_reduce
=
2
,
activation
=
"mish"
,
kernel_initializer
=
'glorot_uniform'
,
bias_initializer
=
'zeros'
,
bias_regularizer
=
None
,
weight_decay
=
None
,
# default find where is it is stated
use_bn
=
True
,
use_sync_bn
=
False
,
norm_momentum
=
0.99
,
norm_epsilon
=
0.001
,
**
kwargs
):
super
().
__init__
(
**
kwargs
)
def
__init__
(
#layer params
self
,
self
.
_filters
=
filters
filters
,
self
.
_filter_reduce
=
filter_reduce
filter_reduce
=
2
,
self
.
_activation
=
activation
activation
=
"mish"
,
kernel_initializer
=
'glorot_uniform'
,
bias_initializer
=
'zeros'
,
bias_regularizer
=
None
,
weight_decay
=
None
,
# default find where is it is stated
use_bn
=
True
,
use_sync_bn
=
False
,
norm_momentum
=
0.99
,
norm_epsilon
=
0.001
,
**
kwargs
):
#convoultion params
super
().
__init__
(
**
kwargs
)
self
.
_kernel_initializer
=
kernel_initializer
#layer params
self
.
_bias_initializer
=
bias_initializer
self
.
_filters
=
filters
self
.
_weight_decay
=
weight_decay
self
.
_filter_reduce
=
filter_reduce
self
.
_bias_regularizer
=
bias_regularizer
self
.
_activation
=
activation
self
.
_use_bn
=
use_bn
self
.
_use_sync_bn
=
use_sync_bn
self
.
_norm_moment
=
norm_momentum
self
.
_norm_epsilon
=
norm_epsilon
return
def
build
(
self
,
input_shape
):
#convoultion params
self
.
_conv1
=
DarkConv
(
filters
=
self
.
_filters
//
self
.
_filter_reduce
,
self
.
_kernel_initializer
=
kernel_initializer
kernel_size
=
(
1
,
1
),
self
.
_bias_initializer
=
bias_initializer
strides
=
(
1
,
1
),
self
.
_weight_decay
=
weight_decay
kernel_initializer
=
self
.
_kernel_initializer
,
self
.
_bias_regularizer
=
bias_regularizer
bias_initializer
=
self
.
_bias_initializer
,
self
.
_use_bn
=
use_bn
bias_regularizer
=
self
.
_bias_regularizer
,
self
.
_use_sync_bn
=
use_sync_bn
weight_decay
=
self
.
_weight_decay
,
self
.
_norm_moment
=
norm_momentum
use_bn
=
self
.
_use_bn
,
self
.
_norm_epsilon
=
norm_epsilon
use_sync_bn
=
self
.
_use_sync_bn
,
return
norm_momentum
=
self
.
_norm_moment
,
norm_epsilon
=
self
.
_norm_epsilon
,
activation
=
self
.
_activation
)
self
.
_concat
=
ks
.
layers
.
Concatenate
(
axis
=-
1
)
self
.
_conv2
=
DarkConv
(
filters
=
self
.
_filters
,
kernel_size
=
(
1
,
1
),
strides
=
(
1
,
1
),
kernel_initializer
=
self
.
_kernel_initializer
,
bias_initializer
=
self
.
_bias_initializer
,
bias_regularizer
=
self
.
_bias_regularizer
,
weight_decay
=
self
.
_weight_decay
,
use_bn
=
self
.
_use_bn
,
use_sync_bn
=
self
.
_use_sync_bn
,
norm_momentum
=
self
.
_norm_moment
,
norm_epsilon
=
self
.
_norm_epsilon
,
activation
=
self
.
_activation
)
return
def
call
(
self
,
inputs
):
def
build
(
self
,
input_shape
):
x_prev
,
x_csp
=
inputs
self
.
_conv1
=
DarkConv
(
filters
=
self
.
_filters
//
self
.
_filter_reduce
,
x
=
self
.
_conv1
(
x_prev
)
kernel_size
=
(
1
,
1
),
x
=
self
.
_concat
([
x
,
x_csp
])
strides
=
(
1
,
1
),
x
=
self
.
_conv2
(
x
)
kernel_initializer
=
self
.
_kernel_initializer
,
return
x
bias_initializer
=
self
.
_bias_initializer
,
bias_regularizer
=
self
.
_bias_regularizer
,
weight_decay
=
self
.
_weight_decay
,
use_bn
=
self
.
_use_bn
,
use_sync_bn
=
self
.
_use_sync_bn
,
norm_momentum
=
self
.
_norm_moment
,
norm_epsilon
=
self
.
_norm_epsilon
,
activation
=
self
.
_activation
)
self
.
_concat
=
ks
.
layers
.
Concatenate
(
axis
=-
1
)
self
.
_conv2
=
DarkConv
(
filters
=
self
.
_filters
,
kernel_size
=
(
1
,
1
),
strides
=
(
1
,
1
),
kernel_initializer
=
self
.
_kernel_initializer
,
bias_initializer
=
self
.
_bias_initializer
,
bias_regularizer
=
self
.
_bias_regularizer
,
weight_decay
=
self
.
_weight_decay
,
use_bn
=
self
.
_use_bn
,
use_sync_bn
=
self
.
_use_sync_bn
,
norm_momentum
=
self
.
_norm_moment
,
norm_epsilon
=
self
.
_norm_epsilon
,
activation
=
self
.
_activation
)
return
def
call
(
self
,
inputs
):
x_prev
,
x_csp
=
inputs
x
=
self
.
_conv1
(
x_prev
)
x
=
self
.
_concat
([
x
,
x_csp
])
x
=
self
.
_conv2
(
x
)
return
x
official/vision/beta/projects/yolo/modeling/building_blocks/_CSPDownSample.py
View file @
e43d75f3
...
@@ -5,80 +5,81 @@ from ._DarkConv import DarkConv
...
@@ -5,80 +5,81 @@ from ._DarkConv import DarkConv
@
ks
.
utils
.
register_keras_serializable
(
package
=
'yolo'
)
@
ks
.
utils
.
register_keras_serializable
(
package
=
'yolo'
)
class
CSPDownSample
(
ks
.
layers
.
Layer
):
class
CSPDownSample
(
ks
.
layers
.
Layer
):
def
__init__
(
self
,
filters
,
filter_reduce
=
2
,
activation
=
"mish"
,
kernel_initializer
=
'glorot_uniform'
,
bias_initializer
=
'zeros'
,
bias_regularizer
=
None
,
weight_decay
=
None
,
# default find where is it is stated
use_bn
=
True
,
use_sync_bn
=
False
,
norm_momentum
=
0.99
,
norm_epsilon
=
0.001
,
**
kwargs
):
super
().
__init__
(
**
kwargs
)
def
__init__
(
#layer params
self
,
self
.
_filters
=
filters
filters
,
self
.
_filter_reduce
=
filter_reduce
filter_reduce
=
2
,
self
.
_activation
=
activation
activation
=
"mish"
,
kernel_initializer
=
'glorot_uniform'
,
bias_initializer
=
'zeros'
,
bias_regularizer
=
None
,
weight_decay
=
None
,
# default find where is it is stated
use_bn
=
True
,
use_sync_bn
=
False
,
norm_momentum
=
0.99
,
norm_epsilon
=
0.001
,
**
kwargs
):
#convoultion params
super
().
__init__
(
**
kwargs
)
self
.
_kernel_initializer
=
kernel_initializer
#layer params
self
.
_bias_initializer
=
bias_initializer
self
.
_filters
=
filters
self
.
_weight_decay
=
weight_decay
self
.
_filter_reduce
=
filter_reduce
self
.
_bias_regularizer
=
bias_regularizer
self
.
_activation
=
activation
self
.
_use_bn
=
use_bn
self
.
_use_sync_bn
=
use_sync_bn
self
.
_norm_moment
=
norm_momentum
self
.
_norm_epsilon
=
norm_epsilon
return
def
build
(
self
,
input_shape
):
#convoultion params
self
.
_conv1
=
DarkConv
(
filters
=
self
.
_filters
,
self
.
_kernel_initializer
=
kernel_initializer
kernel_size
=
(
3
,
3
),
self
.
_bias_initializer
=
bias_initializer
strides
=
(
2
,
2
),
self
.
_weight_decay
=
weight_decay
kernel_initializer
=
self
.
_kernel_initializer
,
self
.
_bias_regularizer
=
bias_regularizer
bias_initializer
=
self
.
_bias_initializer
,
self
.
_use_bn
=
use_bn
bias_regularizer
=
self
.
_bias_regularizer
,
self
.
_use_sync_bn
=
use_sync_bn
weight_decay
=
self
.
_weight_decay
,
self
.
_norm_moment
=
norm_momentum
use_bn
=
self
.
_use_bn
,
self
.
_norm_epsilon
=
norm_epsilon
use_sync_bn
=
self
.
_use_sync_bn
,
return
norm_momentum
=
self
.
_norm_moment
,
norm_epsilon
=
self
.
_norm_epsilon
,
activation
=
self
.
_activation
)
self
.
_conv2
=
DarkConv
(
filters
=
self
.
_filters
//
self
.
_filter_reduce
,
kernel_size
=
(
1
,
1
),
strides
=
(
1
,
1
),
kernel_initializer
=
self
.
_kernel_initializer
,
bias_initializer
=
self
.
_bias_initializer
,
bias_regularizer
=
self
.
_bias_regularizer
,
weight_decay
=
self
.
_weight_decay
,
use_bn
=
self
.
_use_bn
,
use_sync_bn
=
self
.
_use_sync_bn
,
norm_momentum
=
self
.
_norm_moment
,
norm_epsilon
=
self
.
_norm_epsilon
,
activation
=
self
.
_activation
)
self
.
_conv3
=
DarkConv
(
filters
=
self
.
_filters
//
self
.
_filter_reduce
,
def
build
(
self
,
input_shape
):
kernel_size
=
(
1
,
1
),
self
.
_conv1
=
DarkConv
(
filters
=
self
.
_filters
,
strides
=
(
1
,
1
),
kernel_size
=
(
3
,
3
),
kernel_initializer
=
self
.
_kernel_initializer
,
strides
=
(
2
,
2
),
bias_initializer
=
self
.
_bias_initializer
,
kernel_initializer
=
self
.
_kernel_initializer
,
bias_regularizer
=
self
.
_bias_regularizer
,
bias_initializer
=
self
.
_bias_initializer
,
weight_decay
=
self
.
_weight_decay
,
bias_regularizer
=
self
.
_bias_regularizer
,
use_bn
=
self
.
_use_bn
,
weight_decay
=
self
.
_weight_decay
,
use_sync_bn
=
self
.
_use_sync_bn
,
use_bn
=
self
.
_use_bn
,
norm_momentum
=
self
.
_norm_moment
,
use_sync_bn
=
self
.
_use_sync_bn
,
norm_epsilon
=
self
.
_norm_epsilon
,
norm_momentum
=
self
.
_norm_moment
,
activation
=
self
.
_activation
)
norm_epsilon
=
self
.
_norm_epsilon
,
return
activation
=
self
.
_activation
)
self
.
_conv2
=
DarkConv
(
filters
=
self
.
_filters
//
self
.
_filter_reduce
,
kernel_size
=
(
1
,
1
),
strides
=
(
1
,
1
),
kernel_initializer
=
self
.
_kernel_initializer
,
bias_initializer
=
self
.
_bias_initializer
,
bias_regularizer
=
self
.
_bias_regularizer
,
weight_decay
=
self
.
_weight_decay
,
use_bn
=
self
.
_use_bn
,
use_sync_bn
=
self
.
_use_sync_bn
,
norm_momentum
=
self
.
_norm_moment
,
norm_epsilon
=
self
.
_norm_epsilon
,
activation
=
self
.
_activation
)
def
call
(
self
,
inputs
):
self
.
_conv3
=
DarkConv
(
filters
=
self
.
_filters
//
self
.
_filter_reduce
,
x
=
self
.
_conv1
(
inputs
)
kernel_size
=
(
1
,
1
),
y
=
self
.
_conv2
(
x
)
strides
=
(
1
,
1
),
x
=
self
.
_conv3
(
x
)
kernel_initializer
=
self
.
_kernel_initializer
,
return
(
x
,
y
)
bias_initializer
=
self
.
_bias_initializer
,
bias_regularizer
=
self
.
_bias_regularizer
,
weight_decay
=
self
.
_weight_decay
,
use_bn
=
self
.
_use_bn
,
use_sync_bn
=
self
.
_use_sync_bn
,
norm_momentum
=
self
.
_norm_moment
,
norm_epsilon
=
self
.
_norm_epsilon
,
activation
=
self
.
_activation
)
return
def
call
(
self
,
inputs
):
x
=
self
.
_conv1
(
inputs
)
y
=
self
.
_conv2
(
x
)
x
=
self
.
_conv3
(
x
)
return
(
x
,
y
)
official/vision/beta/projects/yolo/modeling/building_blocks/_CSPTiny.py
View file @
e43d75f3
...
@@ -3,152 +3,154 @@ import tensorflow as tf
...
@@ -3,152 +3,154 @@ import tensorflow as tf
import
tensorflow.keras
as
ks
import
tensorflow.keras
as
ks
from
._DarkConv
import
DarkConv
from
._DarkConv
import
DarkConv
@
ks
.
utils
.
register_keras_serializable
(
package
=
'yolo'
)
@
ks
.
utils
.
register_keras_serializable
(
package
=
'yolo'
)
class
CSPTiny
(
ks
.
layers
.
Layer
):
class
CSPTiny
(
ks
.
layers
.
Layer
):
def
__init__
(
self
,
filters
=
1
,
use_bias
=
True
,
kernel_initializer
=
'glorot_uniform'
,
bias_initializer
=
'zeros'
,
bias_regularizer
=
None
,
weight_decay
=
None
,
# default find where is it is stated
use_bn
=
True
,
use_sync_bn
=
False
,
group_id
=
1
,
groups
=
2
,
norm_momentum
=
0.99
,
norm_epsilon
=
0.001
,
activation
=
'leaky'
,
downsample
=
True
,
leaky_alpha
=
0.1
,
**
kwargs
):
# darkconv params
def
__init__
(
self
.
_filters
=
filters
self
,
self
.
_use_bias
=
use_bias
filters
=
1
,
self
.
_kernel_initializer
=
kernel_initializer
use_bias
=
True
,
self
.
_bias_initializer
=
bias_initializer
kernel_initializer
=
'glorot_uniform'
,
self
.
_bias_regularizer
=
bias_regularizer
bias_initializer
=
'zeros'
,
self
.
_use_bn
=
use_bn
bias_regularizer
=
None
,
self
.
_use_sync_bn
=
use_sync_bn
weight_decay
=
None
,
# default find where is it is stated
self
.
_weight_decay
=
weight_decay
use_bn
=
True
,
self
.
_groups
=
groups
use_sync_bn
=
False
,
self
.
_group_id
=
group_id
group_id
=
1
,
self
.
_downsample
=
downsample
groups
=
2
,
norm_momentum
=
0.99
,
norm_epsilon
=
0.001
,
activation
=
'leaky'
,
downsample
=
True
,
leaky_alpha
=
0.1
,
**
kwargs
):
# darkconv params
self
.
_filters
=
filters
self
.
_use_bias
=
use_bias
self
.
_kernel_initializer
=
kernel_initializer
self
.
_bias_initializer
=
bias_initializer
self
.
_bias_regularizer
=
bias_regularizer
self
.
_use_bn
=
use_bn
self
.
_use_sync_bn
=
use_sync_bn
self
.
_weight_decay
=
weight_decay
self
.
_groups
=
groups
self
.
_group_id
=
group_id
self
.
_downsample
=
downsample
# normal params
self
.
_norm_moment
=
norm_momentum
self
.
_norm_epsilon
=
norm_epsilon
#
normal
params
#
activation
params
self
.
_
norm_moment
=
norm_momentum
self
.
_
conv_activation
=
activation
self
.
_
norm_epsilon
=
norm_epsilon
self
.
_
leaky_alpha
=
leaky_alpha
# activation params
super
().
__init__
(
**
kwargs
)
self
.
_conv_activation
=
activation
return
self
.
_leaky_alpha
=
leaky_alpha
super
().
__init__
(
**
kwargs
)
def
build
(
self
,
input_shape
):
return
self
.
_convlayer1
=
DarkConv
(
filters
=
self
.
_filters
,
kernel_size
=
(
3
,
3
),
strides
=
(
1
,
1
),
padding
=
'same'
,
use_bias
=
self
.
_use_bias
,
kernel_initializer
=
self
.
_kernel_initializer
,
bias_initializer
=
self
.
_bias_initializer
,
bias_regularizer
=
self
.
_bias_regularizer
,
weight_decay
=
self
.
_weight_decay
,
use_bn
=
self
.
_use_bn
,
use_sync_bn
=
self
.
_use_sync_bn
,
norm_momentum
=
self
.
_norm_moment
,
norm_epsilon
=
self
.
_norm_epsilon
,
activation
=
self
.
_conv_activation
,
leaky_alpha
=
self
.
_leaky_alpha
)
def
build
(
self
,
input_shape
):
self
.
_convlayer2
=
DarkConv
(
filters
=
self
.
_filters
//
2
,
self
.
_convlayer1
=
DarkConv
(
filters
=
self
.
_filters
,
kernel_size
=
(
3
,
3
),
kernel_size
=
(
3
,
3
),
strides
=
(
1
,
1
),
strides
=
(
1
,
1
),
padding
=
'same'
,
padding
=
'same'
,
use_bias
=
self
.
_use_bias
,
use_bias
=
self
.
_use_bias
,
kernel_initializer
=
self
.
_kernel_initializer
,
kernel_initializer
=
self
.
_kernel_initializer
,
bias_initializer
=
self
.
_bias_initializer
,
bias_initializer
=
self
.
_bias_initializer
,
bias_regularizer
=
self
.
_bias_regularizer
,
bias_regularizer
=
self
.
_bias_regularizer
,
weight_decay
=
self
.
_weight_decay
,
weight_decay
=
self
.
_weight_decay
,
use_bn
=
self
.
_use_bn
,
use_bn
=
self
.
_use_bn
,
use_sync_bn
=
self
.
_use_sync_bn
,
use_sync_bn
=
self
.
_use_sync_bn
,
norm_momentum
=
self
.
_norm_moment
,
norm_momentum
=
self
.
_norm_moment
,
norm_epsilon
=
self
.
_norm_epsilon
,
norm_epsilon
=
self
.
_norm_epsilon
,
activation
=
self
.
_conv_activation
,
activation
=
self
.
_conv_activation
,
leaky_alpha
=
self
.
_leaky_alpha
)
leaky_alpha
=
self
.
_leaky_alpha
)
self
.
_convlayer2
=
DarkConv
(
filters
=
self
.
_filters
//
2
,
self
.
_convlayer3
=
DarkConv
(
filters
=
self
.
_filters
//
2
,
kernel_size
=
(
3
,
3
),
kernel_size
=
(
3
,
3
),
strides
=
(
1
,
1
),
strides
=
(
1
,
1
),
padding
=
'same'
,
padding
=
'same'
,
use_bias
=
self
.
_use_bias
,
use_bias
=
self
.
_use_bias
,
kernel_initializer
=
self
.
_kernel_initializer
,
kernel_initializer
=
self
.
_kernel_initializer
,
bias_initializer
=
self
.
_bias_initializer
,
bias_initializer
=
self
.
_bias_initializer
,
bias_regularizer
=
self
.
_bias_regularizer
,
bias_regularizer
=
self
.
_bias_regularizer
,
weight_decay
=
self
.
_weight_decay
,
weight_decay
=
self
.
_weight_decay
,
use_bn
=
self
.
_use_bn
,
use_bn
=
self
.
_use_bn
,
use_sync_bn
=
self
.
_use_sync_bn
,
use_sync_bn
=
self
.
_use_sync_bn
,
norm_momentum
=
self
.
_norm_moment
,
norm_momentum
=
self
.
_norm_moment
,
norm_epsilon
=
self
.
_norm_epsilon
,
norm_epsilon
=
self
.
_norm_epsilon
,
activation
=
self
.
_conv_activation
,
activation
=
self
.
_conv_activation
,
leaky_alpha
=
self
.
_leaky_alpha
)
leaky_alpha
=
self
.
_leaky_alpha
)
self
.
_convlayer3
=
DarkConv
(
filters
=
self
.
_filters
//
2
,
kernel_size
=
(
3
,
3
),
strides
=
(
1
,
1
),
padding
=
'same'
,
use_bias
=
self
.
_use_bias
,
kernel_initializer
=
self
.
_kernel_initializer
,
bias_initializer
=
self
.
_bias_initializer
,
bias_regularizer
=
self
.
_bias_regularizer
,
weight_decay
=
self
.
_weight_decay
,
use_bn
=
self
.
_use_bn
,
use_sync_bn
=
self
.
_use_sync_bn
,
norm_momentum
=
self
.
_norm_moment
,
norm_epsilon
=
self
.
_norm_epsilon
,
activation
=
self
.
_conv_activation
,
leaky_alpha
=
self
.
_leaky_alpha
)
self
.
_convlayer4
=
DarkConv
(
filters
=
self
.
_filters
,
self
.
_convlayer4
=
DarkConv
(
filters
=
self
.
_filters
,
kernel_size
=
(
1
,
1
),
kernel_size
=
(
1
,
1
),
strides
=
(
1
,
1
),
strides
=
(
1
,
1
),
padding
=
'same'
,
padding
=
'same'
,
use_bias
=
self
.
_use_bias
,
use_bias
=
self
.
_use_bias
,
kernel_initializer
=
self
.
_kernel_initializer
,
kernel_initializer
=
self
.
_kernel_initializer
,
bias_initializer
=
self
.
_bias_initializer
,
bias_initializer
=
self
.
_bias_initializer
,
bias_regularizer
=
self
.
_bias_regularizer
,
bias_regularizer
=
self
.
_bias_regularizer
,
weight_decay
=
self
.
_weight_decay
,
weight_decay
=
self
.
_weight_decay
,
use_bn
=
self
.
_use_bn
,
use_bn
=
self
.
_use_bn
,
use_sync_bn
=
self
.
_use_sync_bn
,
use_sync_bn
=
self
.
_use_sync_bn
,
norm_momentum
=
self
.
_norm_moment
,
norm_momentum
=
self
.
_norm_moment
,
norm_epsilon
=
self
.
_norm_epsilon
,
norm_epsilon
=
self
.
_norm_epsilon
,
activation
=
self
.
_conv_activation
,
activation
=
self
.
_conv_activation
,
leaky_alpha
=
self
.
_leaky_alpha
)
leaky_alpha
=
self
.
_leaky_alpha
)
self
.
_maxpool
=
tf
.
keras
.
layers
.
MaxPool2D
(
pool_size
=
2
,
self
.
_maxpool
=
tf
.
keras
.
layers
.
MaxPool2D
(
pool_size
=
2
,
strides
=
2
,
strides
=
2
,
padding
=
"same"
,
padding
=
"same"
,
data_format
=
None
)
data_format
=
None
)
super
().
build
(
input_shape
)
super
().
build
(
input_shape
)
return
return
def
call
(
self
,
inputs
):
def
call
(
self
,
inputs
):
x1
=
self
.
_convlayer1
(
inputs
)
x1
=
self
.
_convlayer1
(
inputs
)
x2
=
tf
.
split
(
x1
,
self
.
_groups
,
axis
=
-
1
)
x2
=
tf
.
split
(
x1
,
self
.
_groups
,
axis
=
-
1
)
x3
=
self
.
_convlayer2
(
x2
[
self
.
_group_id
])
x3
=
self
.
_convlayer2
(
x2
[
self
.
_group_id
])
x4
=
self
.
_convlayer3
(
x3
)
x4
=
self
.
_convlayer3
(
x3
)
x5
=
tf
.
concat
([
x4
,
x3
],
axis
=
-
1
)
x5
=
tf
.
concat
([
x4
,
x3
],
axis
=
-
1
)
x6
=
self
.
_convlayer4
(
x5
)
x6
=
self
.
_convlayer4
(
x5
)
x
=
tf
.
concat
([
x1
,
x6
],
axis
=
-
1
)
x
=
tf
.
concat
([
x1
,
x6
],
axis
=-
1
)
if
self
.
_downsample
:
if
self
.
_downsample
:
x
=
self
.
_maxpool
(
x
)
x
=
self
.
_maxpool
(
x
)
return
x
,
x6
return
x
,
x6
def
get_config
(
self
):
def
get_config
(
self
):
# used to store/share parameters to reconsturct the model
# used to store/share parameters to reconsturct the model
layer_config
=
{
layer_config
=
{
"filters"
:
self
.
_filters
,
"filters"
:
self
.
_filters
,
"use_bias"
:
self
.
_use_bias
,
"use_bias"
:
self
.
_use_bias
,
"strides"
:
self
.
_strides
,
"strides"
:
self
.
_strides
,
"kernel_initializer"
:
self
.
_kernel_initializer
,
"kernel_initializer"
:
self
.
_kernel_initializer
,
"bias_initializer"
:
self
.
_bias_initializer
,
"bias_initializer"
:
self
.
_bias_initializer
,
"weight_decay"
:
self
.
_weight_decay
,
"weight_decay"
:
self
.
_weight_decay
,
"use_bn"
:
self
.
_use_bn
,
"use_bn"
:
self
.
_use_bn
,
"use_sync_bn"
:
self
.
_use_sync_bn
,
"use_sync_bn"
:
self
.
_use_sync_bn
,
"norm_moment"
:
self
.
_norm_moment
,
"norm_moment"
:
self
.
_norm_moment
,
"norm_epsilon"
:
self
.
_norm_epsilon
,
"norm_epsilon"
:
self
.
_norm_epsilon
,
"activation"
:
self
.
_conv_activation
,
"activation"
:
self
.
_conv_activation
,
"leaky_alpha"
:
self
.
_leaky_alpha
,
"leaky_alpha"
:
self
.
_leaky_alpha
,
"sc_activation"
:
self
.
_sc_activation
,
"sc_activation"
:
self
.
_sc_activation
,
}
}
layer_config
.
update
(
super
().
get_config
())
layer_config
.
update
(
super
().
get_config
())
return
layer_config
return
layer_config
official/vision/beta/projects/yolo/modeling/building_blocks/_DarkConv.py
View file @
e43d75f3
...
@@ -11,26 +11,27 @@ from yolo.modeling.functions.mish_activation import mish
...
@@ -11,26 +11,27 @@ from yolo.modeling.functions.mish_activation import mish
@
ks
.
utils
.
register_keras_serializable
(
package
=
'yolo'
)
@
ks
.
utils
.
register_keras_serializable
(
package
=
'yolo'
)
class
DarkConv
(
ks
.
layers
.
Layer
):
class
DarkConv
(
ks
.
layers
.
Layer
):
def
__init__
(
self
,
def
__init__
(
filters
=
1
,
self
,
kernel_size
=
(
1
,
1
),
filters
=
1
,
strides
=
(
1
,
1
),
kernel_size
=
(
1
,
1
),
padding
=
'same'
,
strides
=
(
1
,
1
),
dilation_rate
=
(
1
,
1
),
padding
=
'same'
,
use_bias
=
True
,
dilation_rate
=
(
1
,
1
),
kernel_initializer
=
'glorot_uniform'
,
use_bias
=
True
,
bias_initializer
=
'zeros'
,
kernel_initializer
=
'glorot_uniform'
,
bias_regularizer
=
None
,
bias_initializer
=
'zeros'
,
weight_decay
=
None
,
# default find where is it is stated
bias_regularizer
=
None
,
use_bn
=
True
,
weight_decay
=
None
,
# default find where is it is stated
use_sync_bn
=
False
,
use_bn
=
True
,
norm_momentum
=
0.99
,
use_sync_bn
=
False
,
norm_epsilon
=
0.001
,
norm_momentum
=
0.99
,
activation
=
'leaky'
,
norm_epsilon
=
0.001
,
leaky_alpha
=
0.1
,
activation
=
'leaky'
,
**
kwargs
):
leaky_alpha
=
0.1
,
'''
**
kwargs
):
'''
Modified Convolution layer to match that of the DarkNet Library
Modified Convolution layer to match that of the DarkNet Library
Args:
Args:
...
@@ -56,120 +57,118 @@ class DarkConv(ks.layers.Layer):
...
@@ -56,120 +57,118 @@ class DarkConv(ks.layers.Layer):
'''
'''
# convolution params
# convolution params
self
.
_filters
=
filters
self
.
_filters
=
filters
self
.
_kernel_size
=
kernel_size
self
.
_kernel_size
=
kernel_size
self
.
_strides
=
strides
self
.
_strides
=
strides
self
.
_padding
=
padding
self
.
_padding
=
padding
self
.
_dilation_rate
=
dilation_rate
self
.
_dilation_rate
=
dilation_rate
self
.
_use_bias
=
use_bias
self
.
_use_bias
=
use_bias
self
.
_kernel_initializer
=
kernel_initializer
self
.
_kernel_initializer
=
kernel_initializer
self
.
_bias_initializer
=
bias_initializer
self
.
_bias_initializer
=
bias_initializer
self
.
_weight_decay
=
weight_decay
self
.
_weight_decay
=
weight_decay
self
.
_bias_regularizer
=
bias_regularizer
self
.
_bias_regularizer
=
bias_regularizer
# batchnorm params
# batchnorm params
self
.
_use_bn
=
use_bn
self
.
_use_bn
=
use_bn
if
self
.
_use_bn
:
if
self
.
_use_bn
:
self
.
_use_bias
=
False
self
.
_use_bias
=
False
self
.
_use_sync_bn
=
use_sync_bn
self
.
_use_sync_bn
=
use_sync_bn
self
.
_norm_moment
=
norm_momentum
self
.
_norm_moment
=
norm_momentum
self
.
_norm_epsilon
=
norm_epsilon
self
.
_norm_epsilon
=
norm_epsilon
if
tf
.
keras
.
backend
.
image_data_format
()
==
'channels_last'
:
if
tf
.
keras
.
backend
.
image_data_format
()
==
'channels_last'
:
# format: (batch_size, height, width, channels)
# format: (batch_size, height, width, channels)
self
.
_bn_axis
=
-
1
self
.
_bn_axis
=
-
1
else
:
else
:
# format: (batch_size, channels, width, height)
# format: (batch_size, channels, width, height)
self
.
_bn_axis
=
1
self
.
_bn_axis
=
1
# activation params
# activation params
if
activation
is
None
:
if
activation
is
None
:
self
.
_activation
=
'linear'
self
.
_activation
=
'linear'
else
:
else
:
self
.
_activation
=
activation
self
.
_activation
=
activation
self
.
_leaky_alpha
=
leaky_alpha
self
.
_leaky_alpha
=
leaky_alpha
super
(
DarkConv
,
self
).
__init__
(
**
kwargs
)
super
(
DarkConv
,
self
).
__init__
(
**
kwargs
)
return
return
def
build
(
self
,
input_shape
):
def
build
(
self
,
input_shape
):
kernel_size
=
self
.
_kernel_size
if
type
(
kernel_size
=
self
.
_kernel_size
if
type
(
self
.
_kernel_size
)
==
int
else
self
.
_kernel_size
[
0
]
self
.
_kernel_size
)
==
int
else
self
.
_kernel_size
[
0
]
if
self
.
_padding
==
"same"
and
kernel_size
!=
1
:
if
self
.
_padding
==
"same"
and
kernel_size
!=
1
:
self
.
_zeropad
=
ks
.
layers
.
ZeroPadding2D
(
self
.
_zeropad
=
ks
.
layers
.
ZeroPadding2D
(
((
1
,
1
),
(
1
,
1
)))
# symetric padding
((
1
,
1
),
(
1
,
1
)))
# symetric padding
else
:
else
:
self
.
_zeropad
=
Identity
()
self
.
_zeropad
=
Identity
()
self
.
conv
=
ks
.
layers
.
Conv2D
(
self
.
conv
=
ks
.
layers
.
Conv2D
(
filters
=
self
.
_filters
,
filters
=
self
.
_filters
,
kernel_size
=
self
.
_kernel_size
,
kernel_size
=
self
.
_kernel_size
,
strides
=
self
.
_strides
,
strides
=
self
.
_strides
,
padding
=
"valid"
,
#self._padding,
padding
=
"valid"
,
#self._padding,
dilation_rate
=
self
.
_dilation_rate
,
dilation_rate
=
self
.
_dilation_rate
,
use_bias
=
self
.
_use_bias
,
use_bias
=
self
.
_use_bias
,
kernel_initializer
=
self
.
_kernel_initializer
,
kernel_initializer
=
self
.
_kernel_initializer
,
bias_initializer
=
self
.
_bias_initializer
,
bias_initializer
=
self
.
_bias_initializer
,
kernel_regularizer
=
self
.
_weight_decay
,
kernel_regularizer
=
self
.
_weight_decay
,
bias_regularizer
=
self
.
_bias_regularizer
)
bias_regularizer
=
self
.
_bias_regularizer
)
#self.conv =tf.nn.convolution(filters=self._filters, strides=self._strides, padding=self._padding
#self.conv =tf.nn.convolution(filters=self._filters, strides=self._strides, padding=self._padding
if
self
.
_use_bn
:
if
self
.
_use_bn
:
if
self
.
_use_sync_bn
:
if
self
.
_use_sync_bn
:
self
.
bn
=
tf
.
keras
.
layers
.
experimental
.
SyncBatchNormalization
(
self
.
bn
=
tf
.
keras
.
layers
.
experimental
.
SyncBatchNormalization
(
momentum
=
self
.
_norm_moment
,
momentum
=
self
.
_norm_moment
,
epsilon
=
self
.
_norm_epsilon
,
epsilon
=
self
.
_norm_epsilon
,
axis
=
self
.
_bn_axis
)
axis
=
self
.
_bn_axis
)
else
:
else
:
self
.
bn
=
ks
.
layers
.
BatchNormalization
(
self
.
bn
=
ks
.
layers
.
BatchNormalization
(
momentum
=
self
.
_norm_moment
,
momentum
=
self
.
_norm_moment
,
epsilon
=
self
.
_norm_epsilon
,
epsilon
=
self
.
_norm_epsilon
,
axis
=
self
.
_bn_axis
)
axis
=
self
.
_bn_axis
)
else
:
else
:
self
.
bn
=
Identity
()
self
.
bn
=
Identity
()
if
self
.
_activation
==
'leaky'
:
if
self
.
_activation
==
'leaky'
:
alpha
=
{
"alpha"
:
self
.
_leaky_alpha
}
alpha
=
{
"alpha"
:
self
.
_leaky_alpha
}
self
.
_activation_fn
=
partial
(
tf
.
nn
.
leaky_relu
,
**
alpha
)
self
.
_activation_fn
=
partial
(
tf
.
nn
.
leaky_relu
,
**
alpha
)
elif
self
.
_activation
==
'mish'
:
elif
self
.
_activation
==
'mish'
:
self
.
_activation_fn
=
mish
()
self
.
_activation_fn
=
mish
()
else
:
else
:
self
.
_activation_fn
=
ks
.
layers
.
Activation
(
activation
=
self
.
_activation
)
self
.
_activation_fn
=
ks
.
layers
.
Activation
(
activation
=
self
.
_activation
)
super
(
DarkConv
,
self
).
build
(
input_shape
)
return
super
(
DarkConv
,
self
).
build
(
input_shape
)
return
def
call
(
self
,
inputs
):
x
=
self
.
_zeropad
(
inputs
)
def
call
(
self
,
inputs
):
x
=
self
.
conv
(
x
)
x
=
self
.
_zeropad
(
inputs
)
x
=
self
.
bn
(
x
)
x
=
self
.
conv
(
x
)
x
=
self
.
_activation_fn
(
x
)
x
=
self
.
bn
(
x
)
return
x
x
=
self
.
_activation_fn
(
x
)
return
x
def
get_config
(
self
):
# used to store/share parameters to reconsturct the model
def
get_config
(
self
):
layer_config
=
{
# used to store/share parameters to reconsturct the model
"filters"
:
self
.
_filters
,
layer_config
=
{
"kernel_size"
:
self
.
_kernel_size
,
"filters"
:
self
.
_filters
,
"strides"
:
self
.
_strides
,
"kernel_size"
:
self
.
_kernel_size
,
"padding"
:
self
.
_padding
,
"strides"
:
self
.
_strides
,
"dilation_rate"
:
self
.
_dilation_rate
,
"padding"
:
self
.
_padding
,
"use_bias"
:
self
.
_use_bias
,
"dilation_rate"
:
self
.
_dilation_rate
,
"kernel_initializer"
:
self
.
_kernel_initializer
,
"use_bias"
:
self
.
_use_bias
,
"bias_initializer"
:
self
.
_bias_initializer
,
"kernel_initializer"
:
self
.
_kernel_initializer
,
"bias_regularizer"
:
self
.
_bias_regularizer
,
"bias_initializer"
:
self
.
_bias_initializer
,
"l2_regularization"
:
self
.
_l2_regularization
,
"bias_regularizer"
:
self
.
_bias_regularizer
,
"use_bn"
:
self
.
_use_bn
,
"l2_regularization"
:
self
.
_l2_regularization
,
"use_sync_bn"
:
self
.
_use_sync_bn
,
"use_bn"
:
self
.
_use_bn
,
"norm_moment"
:
self
.
_norm_moment
,
"use_sync_bn"
:
self
.
_use_sync_bn
,
"norm_epsilon"
:
self
.
_norm_epsilon
,
"norm_moment"
:
self
.
_norm_moment
,
"activation"
:
self
.
_activation
,
"norm_epsilon"
:
self
.
_norm_epsilon
,
"leaky_alpha"
:
self
.
_leaky_alpha
"activation"
:
self
.
_activation
,
}
"leaky_alpha"
:
self
.
_leaky_alpha
layer_config
.
update
(
super
(
DarkConv
,
self
).
get_config
())
}
return
layer_config
layer_config
.
update
(
super
(
DarkConv
,
self
).
get_config
())
return
layer_config
def
__repr__
(
self
):
return
repr
(
self
.
get_config
())
def
__repr__
(
self
):
return
repr
(
self
.
get_config
())
official/vision/beta/projects/yolo/modeling/building_blocks/_DarkResidual.py
View file @
e43d75f3
...
@@ -7,24 +7,25 @@ from ._Identity import Identity
...
@@ -7,24 +7,25 @@ from ._Identity import Identity
@
ks
.
utils
.
register_keras_serializable
(
package
=
'yolo'
)
@
ks
.
utils
.
register_keras_serializable
(
package
=
'yolo'
)
class
DarkResidual
(
ks
.
layers
.
Layer
):
class
DarkResidual
(
ks
.
layers
.
Layer
):
def
__init__
(
self
,
filters
=
1
,
def
__init__
(
self
,
filter_scale
=
2
,
filters
=
1
,
use_bias
=
True
,
filter_scale
=
2
,
kernel_initializer
=
'glorot_uniform'
,
use_bias
=
True
,
bias_initializer
=
'zeros'
,
kernel_initializer
=
'glorot_uniform'
,
weight_decay
=
None
,
bias_initializer
=
'zeros'
,
bias_regularizer
=
None
,
weight_decay
=
None
,
use_bn
=
True
,
bias_regularizer
=
None
,
use_sync_bn
=
False
,
use_bn
=
True
,
norm_momentum
=
0.99
,
use_sync_bn
=
False
,
norm_epsilon
=
0.001
,
norm_momentum
=
0.99
,
activation
=
'leaky'
,
norm_epsilon
=
0.001
,
leaky_alpha
=
0.1
,
activation
=
'leaky'
,
sc_activation
=
'linear'
,
leaky_alpha
=
0.1
,
downsample
=
False
,
sc_activation
=
'linear'
,
**
kwargs
):
downsample
=
False
,
'''
**
kwargs
):
'''
DarkNet block with Residual connection for Yolo v3 Backbone
DarkNet block with Residual connection for Yolo v3 Backbone
Args:
Args:
...
@@ -46,113 +47,112 @@ class DarkResidual(ks.layers.Layer):
...
@@ -46,113 +47,112 @@ class DarkResidual(ks.layers.Layer):
**kwargs: Keyword Arguments
**kwargs: Keyword Arguments
'''
'''
# downsample
# downsample
self
.
_downsample
=
downsample
self
.
_downsample
=
downsample
# darkconv params
# darkconv params
self
.
_filters
=
filters
self
.
_filters
=
filters
self
.
_filter_scale
=
filter_scale
self
.
_filter_scale
=
filter_scale
self
.
_use_bias
=
use_bias
self
.
_use_bias
=
use_bias
self
.
_kernel_initializer
=
kernel_initializer
self
.
_kernel_initializer
=
kernel_initializer
self
.
_bias_initializer
=
bias_initializer
self
.
_bias_initializer
=
bias_initializer
self
.
_bias_regularizer
=
bias_regularizer
self
.
_bias_regularizer
=
bias_regularizer
self
.
_use_bn
=
use_bn
self
.
_use_bn
=
use_bn
self
.
_use_sync_bn
=
use_sync_bn
self
.
_use_sync_bn
=
use_sync_bn
self
.
_weight_decay
=
weight_decay
self
.
_weight_decay
=
weight_decay
# normal params
# normal params
self
.
_norm_moment
=
norm_momentum
self
.
_norm_moment
=
norm_momentum
self
.
_norm_epsilon
=
norm_epsilon
self
.
_norm_epsilon
=
norm_epsilon
# activation params
# activation params
self
.
_conv_activation
=
activation
self
.
_conv_activation
=
activation
self
.
_leaky_alpha
=
leaky_alpha
self
.
_leaky_alpha
=
leaky_alpha
self
.
_sc_activation
=
sc_activation
self
.
_sc_activation
=
sc_activation
super
().
__init__
(
**
kwargs
)
super
().
__init__
(
**
kwargs
)
return
return
def
build
(
self
,
input_shape
):
def
build
(
self
,
input_shape
):
if
self
.
_downsample
:
if
self
.
_downsample
:
self
.
_dconv
=
DarkConv
(
filters
=
self
.
_filters
,
self
.
_dconv
=
DarkConv
(
filters
=
self
.
_filters
,
kernel_size
=
(
3
,
3
),
kernel_size
=
(
3
,
3
),
strides
=
(
2
,
2
),
strides
=
(
2
,
2
),
padding
=
'same'
,
padding
=
'same'
,
use_bias
=
self
.
_use_bias
,
use_bias
=
self
.
_use_bias
,
kernel_initializer
=
self
.
_kernel_initializer
,
kernel_initializer
=
self
.
_kernel_initializer
,
bias_initializer
=
self
.
_bias_initializer
,
bias_initializer
=
self
.
_bias_initializer
,
bias_regularizer
=
self
.
_bias_regularizer
,
bias_regularizer
=
self
.
_bias_regularizer
,
use_bn
=
self
.
_use_bn
,
use_bn
=
self
.
_use_bn
,
use_sync_bn
=
self
.
_use_sync_bn
,
use_sync_bn
=
self
.
_use_sync_bn
,
norm_momentum
=
self
.
_norm_moment
,
norm_momentum
=
self
.
_norm_moment
,
norm_epsilon
=
self
.
_norm_epsilon
,
norm_epsilon
=
self
.
_norm_epsilon
,
activation
=
self
.
_conv_activation
,
activation
=
self
.
_conv_activation
,
weight_decay
=
self
.
_weight_decay
,
weight_decay
=
self
.
_weight_decay
,
leaky_alpha
=
self
.
_leaky_alpha
)
leaky_alpha
=
self
.
_leaky_alpha
)
else
:
else
:
self
.
_dconv
=
Identity
()
self
.
_dconv
=
Identity
()
self
.
_conv1
=
DarkConv
(
filters
=
self
.
_filters
//
self
.
_filter_scale
,
self
.
_conv1
=
DarkConv
(
filters
=
self
.
_filters
//
self
.
_filter_scale
,
kernel_size
=
(
1
,
1
),
kernel_size
=
(
1
,
1
),
strides
=
(
1
,
1
),
strides
=
(
1
,
1
),
padding
=
'same'
,
padding
=
'same'
,
use_bias
=
self
.
_use_bias
,
use_bias
=
self
.
_use_bias
,
kernel_initializer
=
self
.
_kernel_initializer
,
kernel_initializer
=
self
.
_kernel_initializer
,
bias_initializer
=
self
.
_bias_initializer
,
bias_initializer
=
self
.
_bias_initializer
,
bias_regularizer
=
self
.
_bias_regularizer
,
bias_regularizer
=
self
.
_bias_regularizer
,
use_bn
=
self
.
_use_bn
,
use_bn
=
self
.
_use_bn
,
use_sync_bn
=
self
.
_use_sync_bn
,
use_sync_bn
=
self
.
_use_sync_bn
,
norm_momentum
=
self
.
_norm_moment
,
norm_momentum
=
self
.
_norm_moment
,
norm_epsilon
=
self
.
_norm_epsilon
,
norm_epsilon
=
self
.
_norm_epsilon
,
activation
=
self
.
_conv_activation
,
activation
=
self
.
_conv_activation
,
weight_decay
=
self
.
_weight_decay
,
weight_decay
=
self
.
_weight_decay
,
leaky_alpha
=
self
.
_leaky_alpha
)
leaky_alpha
=
self
.
_leaky_alpha
)
self
.
_conv2
=
DarkConv
(
filters
=
self
.
_filters
,
self
.
_conv2
=
DarkConv
(
filters
=
self
.
_filters
,
kernel_size
=
(
3
,
3
),
kernel_size
=
(
3
,
3
),
strides
=
(
1
,
1
),
strides
=
(
1
,
1
),
padding
=
'same'
,
padding
=
'same'
,
use_bias
=
self
.
_use_bias
,
use_bias
=
self
.
_use_bias
,
kernel_initializer
=
self
.
_kernel_initializer
,
kernel_initializer
=
self
.
_kernel_initializer
,
bias_initializer
=
self
.
_bias_initializer
,
bias_initializer
=
self
.
_bias_initializer
,
bias_regularizer
=
self
.
_bias_regularizer
,
bias_regularizer
=
self
.
_bias_regularizer
,
use_bn
=
self
.
_use_bn
,
use_bn
=
self
.
_use_bn
,
use_sync_bn
=
self
.
_use_sync_bn
,
use_sync_bn
=
self
.
_use_sync_bn
,
norm_momentum
=
self
.
_norm_moment
,
norm_momentum
=
self
.
_norm_moment
,
norm_epsilon
=
self
.
_norm_epsilon
,
norm_epsilon
=
self
.
_norm_epsilon
,
activation
=
self
.
_conv_activation
,
activation
=
self
.
_conv_activation
,
weight_decay
=
self
.
_weight_decay
,
weight_decay
=
self
.
_weight_decay
,
leaky_alpha
=
self
.
_leaky_alpha
)
leaky_alpha
=
self
.
_leaky_alpha
)
self
.
_shortcut
=
ks
.
layers
.
Add
()
self
.
_shortcut
=
ks
.
layers
.
Add
()
self
.
_activation_fn
=
ks
.
layers
.
Activation
(
self
.
_activation_fn
=
ks
.
layers
.
Activation
(
activation
=
self
.
_sc_activation
)
activation
=
self
.
_sc_activation
)
super
().
build
(
input_shape
)
super
().
build
(
input_shape
)
return
return
def
call
(
self
,
inputs
):
def
call
(
self
,
inputs
):
shortcut
=
self
.
_dconv
(
inputs
)
shortcut
=
self
.
_dconv
(
inputs
)
x
=
self
.
_conv1
(
shortcut
)
x
=
self
.
_conv1
(
shortcut
)
x
=
self
.
_conv2
(
x
)
x
=
self
.
_conv2
(
x
)
x
=
self
.
_shortcut
([
x
,
shortcut
])
x
=
self
.
_shortcut
([
x
,
shortcut
])
return
self
.
_activation_fn
(
x
)
return
self
.
_activation_fn
(
x
)
def
get_config
(
self
):
def
get_config
(
self
):
# used to store/share parameters to reconsturct the model
# used to store/share parameters to reconsturct the model
layer_config
=
{
layer_config
=
{
"filters"
:
self
.
_filters
,
"filters"
:
self
.
_filters
,
"use_bias"
:
self
.
_use_bias
,
"use_bias"
:
self
.
_use_bias
,
"kernel_initializer"
:
self
.
_kernel_initializer
,
"kernel_initializer"
:
self
.
_kernel_initializer
,
"bias_initializer"
:
self
.
_bias_initializer
,
"bias_initializer"
:
self
.
_bias_initializer
,
"weight_decay"
:
self
.
_weight_decay
,
"weight_decay"
:
self
.
_weight_decay
,
"use_bn"
:
self
.
_use_bn
,
"use_bn"
:
self
.
_use_bn
,
"use_sync_bn"
:
self
.
_use_sync_bn
,
"use_sync_bn"
:
self
.
_use_sync_bn
,
"norm_moment"
:
self
.
_norm_moment
,
"norm_moment"
:
self
.
_norm_moment
,
"norm_epsilon"
:
self
.
_norm_epsilon
,
"norm_epsilon"
:
self
.
_norm_epsilon
,
"activation"
:
self
.
_conv_activation
,
"activation"
:
self
.
_conv_activation
,
"leaky_alpha"
:
self
.
_leaky_alpha
,
"leaky_alpha"
:
self
.
_leaky_alpha
,
"sc_activation"
:
self
.
_sc_activation
,
"sc_activation"
:
self
.
_sc_activation
,
"downsample"
:
self
.
_downsample
"downsample"
:
self
.
_downsample
}
}
layer_config
.
update
(
super
().
get_config
())
layer_config
.
update
(
super
().
get_config
())
return
layer_config
return
layer_config
official/vision/beta/projects/yolo/modeling/building_blocks/_DarkTiny.py
View file @
e43d75f3
...
@@ -6,98 +6,100 @@ from ._DarkConv import DarkConv
...
@@ -6,98 +6,100 @@ from ._DarkConv import DarkConv
@
ks
.
utils
.
register_keras_serializable
(
package
=
'yolo'
)
@
ks
.
utils
.
register_keras_serializable
(
package
=
'yolo'
)
class
DarkTiny
(
ks
.
layers
.
Layer
):
class
DarkTiny
(
ks
.
layers
.
Layer
):
def
__init__
(
self
,
filters
=
1
,
use_bias
=
True
,
strides
=
2
,
kernel_initializer
=
'glorot_uniform'
,
bias_initializer
=
'zeros'
,
bias_regularizer
=
None
,
weight_decay
=
None
,
# default find where is it is stated
use_bn
=
True
,
use_sync_bn
=
False
,
norm_momentum
=
0.99
,
norm_epsilon
=
0.001
,
activation
=
'leaky'
,
leaky_alpha
=
0.1
,
sc_activation
=
'linear'
,
**
kwargs
):
# darkconv params
def
__init__
(
self
.
_filters
=
filters
self
,
self
.
_use_bias
=
use_bias
filters
=
1
,
self
.
_kernel_initializer
=
kernel_initializer
use_bias
=
True
,
self
.
_bias_initializer
=
bias_initializer
strides
=
2
,
self
.
_bias_regularizer
=
bias_regularizer
kernel_initializer
=
'glorot_uniform'
,
self
.
_use_bn
=
use_bn
bias_initializer
=
'zeros'
,
self
.
_use_sync_bn
=
use_sync_bn
bias_regularizer
=
None
,
self
.
_strides
=
strides
weight_decay
=
None
,
# default find where is it is stated
self
.
_weight_decay
=
weight_decay
use_bn
=
True
,
use_sync_bn
=
False
,
norm_momentum
=
0.99
,
norm_epsilon
=
0.001
,
activation
=
'leaky'
,
leaky_alpha
=
0.1
,
sc_activation
=
'linear'
,
**
kwargs
):
# normal params
# darkconv params
self
.
_norm_moment
=
norm_momentum
self
.
_filters
=
filters
self
.
_norm_epsilon
=
norm_epsilon
self
.
_use_bias
=
use_bias
self
.
_kernel_initializer
=
kernel_initializer
self
.
_bias_initializer
=
bias_initializer
self
.
_bias_regularizer
=
bias_regularizer
self
.
_use_bn
=
use_bn
self
.
_use_sync_bn
=
use_sync_bn
self
.
_strides
=
strides
self
.
_weight_decay
=
weight_decay
# activation params
# normal params
self
.
_conv_activation
=
activation
self
.
_norm_moment
=
norm_momentum
self
.
_leaky_alpha
=
leaky_alpha
self
.
_norm_epsilon
=
norm_epsilon
self
.
_sc_activation
=
sc_activation
super
().
__init__
(
**
kwargs
)
# activation params
return
self
.
_conv_activation
=
activation
self
.
_leaky_alpha
=
leaky_alpha
self
.
_sc_activation
=
sc_activation
def
build
(
self
,
input_shape
):
super
().
__init__
(
**
kwargs
)
# if self._strides == 2:
return
# self._zeropad = ks.layers.ZeroPadding2D(((1,0), (1,0)))
# padding = "valid"
# else:
# self._zeropad = ks.layers.ZeroPadding2D(((0,1), (0,1)))#nn_blocks.Identity()#ks.layers.ZeroPadding2D(((1,0), (1,0)))
# padding = "valid"
self
.
_maxpool
=
tf
.
keras
.
layers
.
MaxPool2D
(
pool_size
=
2
,
strides
=
self
.
_strides
,
padding
=
"same"
,
data_format
=
None
)
self
.
_convlayer
=
DarkConv
(
filters
=
self
.
_filters
,
def
build
(
self
,
input_shape
):
kernel_size
=
(
3
,
3
),
# if self._strides == 2:
strides
=
(
1
,
1
),
# self._zeropad = ks.layers.ZeroPadding2D(((1,0), (1,0)))
padding
=
'same'
,
# padding = "valid"
use_bias
=
self
.
_use_bias
,
# else:
kernel_initializer
=
self
.
_kernel_initializer
,
# self._zeropad = ks.layers.ZeroPadding2D(((0,1), (0,1)))#nn_blocks.Identity()#ks.layers.ZeroPadding2D(((1,0), (1,0)))
bias_initializer
=
self
.
_bias_initializer
,
# padding = "valid"
bias_regularizer
=
self
.
_bias_regularizer
,
self
.
_maxpool
=
tf
.
keras
.
layers
.
MaxPool2D
(
pool_size
=
2
,
weight_decay
=
self
.
_weight_decay
,
strides
=
self
.
_strides
,
use_bn
=
self
.
_use_bn
,
padding
=
"same"
,
use_sync_bn
=
self
.
_use_sync_bn
,
data_format
=
None
)
norm_momentum
=
self
.
_norm_moment
,
norm_epsilon
=
self
.
_norm_epsilon
,
activation
=
self
.
_conv_activation
,
leaky_alpha
=
self
.
_leaky_alpha
)
super
().
build
(
input_shape
)
self
.
_convlayer
=
DarkConv
(
filters
=
self
.
_filters
,
return
kernel_size
=
(
3
,
3
),
strides
=
(
1
,
1
),
padding
=
'same'
,
use_bias
=
self
.
_use_bias
,
kernel_initializer
=
self
.
_kernel_initializer
,
bias_initializer
=
self
.
_bias_initializer
,
bias_regularizer
=
self
.
_bias_regularizer
,
weight_decay
=
self
.
_weight_decay
,
use_bn
=
self
.
_use_bn
,
use_sync_bn
=
self
.
_use_sync_bn
,
norm_momentum
=
self
.
_norm_moment
,
norm_epsilon
=
self
.
_norm_epsilon
,
activation
=
self
.
_conv_activation
,
leaky_alpha
=
self
.
_leaky_alpha
)
def
call
(
self
,
inputs
):
super
().
build
(
input_shape
)
output
=
self
.
_maxpool
(
inputs
)
return
output
=
self
.
_convlayer
(
output
)
return
output
def
get_config
(
self
):
def
call
(
self
,
inputs
):
# used to store/share parameters to reconsturct the model
output
=
self
.
_maxpool
(
inputs
)
layer_config
=
{
output
=
self
.
_convlayer
(
output
)
"filters"
:
self
.
_filters
,
return
output
"use_bias"
:
self
.
_use_bias
,
"strides"
:
self
.
_strides
,
def
get_config
(
self
):
"kernel_initializer"
:
self
.
_kernel_initializer
,
# used to store/share parameters to reconsturct the model
"bias_initializer"
:
self
.
_bias_initializer
,
layer_config
=
{
"l2_regularization"
:
self
.
_l2_regularization
,
"filters"
:
self
.
_filters
,
"use_bn"
:
self
.
_use_bn
,
"use_bias"
:
self
.
_use_bias
,
"use_sync_bn"
:
self
.
_use_sync_bn
,
"strides"
:
self
.
_strides
,
"norm_moment"
:
self
.
_norm_moment
,
"kernel_initializer"
:
self
.
_kernel_initializer
,
"norm_epsilon"
:
self
.
_norm_epsilon
,
"bias_initializer"
:
self
.
_bias_initializer
,
"activation"
:
self
.
_conv_activation
,
"l2_regularization"
:
self
.
_l2_regularization
,
"leaky_alpha"
:
self
.
_leaky_alpha
,
"use_bn"
:
self
.
_use_bn
,
"sc_activation"
:
self
.
_sc_activation
,
"use_sync_bn"
:
self
.
_use_sync_bn
,
}
"norm_moment"
:
self
.
_norm_moment
,
layer_config
.
update
(
super
().
get_config
())
"norm_epsilon"
:
self
.
_norm_epsilon
,
return
layer_config
"activation"
:
self
.
_conv_activation
,
"leaky_alpha"
:
self
.
_leaky_alpha
,
"sc_activation"
:
self
.
_sc_activation
,
}
layer_config
.
update
(
super
().
get_config
())
return
layer_config
official/vision/beta/projects/yolo/modeling/building_blocks/__init__.py
View file @
e43d75f3
...
@@ -4,4 +4,3 @@ from ._DarkTiny import DarkTiny
...
@@ -4,4 +4,3 @@ from ._DarkTiny import DarkTiny
from
._CSPConnect
import
CSPConnect
from
._CSPConnect
import
CSPConnect
from
._CSPDownSample
import
CSPDownSample
from
._CSPDownSample
import
CSPDownSample
from
._CSPTiny
import
CSPTiny
from
._CSPTiny
import
CSPTiny
official/vision/beta/projects/yolo/modeling/tests/test_CSPConnect.py
View file @
e43d75f3
...
@@ -8,48 +8,49 @@ from official.vision.beta.projects.yolo.modeling.building_blocks import CSPConne
...
@@ -8,48 +8,49 @@ from official.vision.beta.projects.yolo.modeling.building_blocks import CSPConne
class
CSPConnect
(
tf
.
test
.
TestCase
,
parameterized
.
TestCase
):
class
CSPConnect
(
tf
.
test
.
TestCase
,
parameterized
.
TestCase
):
@
parameterized
.
named_parameters
((
"same"
,
224
,
224
,
64
,
1
),
(
"downsample"
,
224
,
224
,
64
,
2
))
@
parameterized
.
named_parameters
((
"same"
,
224
,
224
,
64
,
1
),
def
test_pass_through
(
self
,
width
,
height
,
filters
,
mod
):
(
"downsample"
,
224
,
224
,
64
,
2
))
x
=
ks
.
Input
(
shape
=
(
width
,
height
,
filters
))
def
test_pass_through
(
self
,
width
,
height
,
filters
,
mod
):
test_layer
=
layer
(
filters
=
filters
,
filter_reduce
=
mod
)
x
=
ks
.
Input
(
shape
=
(
width
,
height
,
filters
))
test_layer2
=
layer_companion
(
filters
=
filters
,
filter_reduce
=
mod
)
test_layer
=
layer
(
filters
=
filters
,
filter_reduce
=
mod
)
outx
,
px
=
test_layer
(
x
)
test_layer2
=
layer_companion
(
filters
=
filters
,
filter_reduce
=
mod
)
outx
=
test_layer2
([
outx
,
px
])
outx
,
px
=
test_layer
(
x
)
print
(
outx
)
outx
=
test_layer2
([
outx
,
px
])
print
(
outx
.
shape
.
as_list
())
print
(
outx
)
self
.
assertAllEqual
(
print
(
outx
.
shape
.
as_list
())
outx
.
shape
.
as_list
(),
self
.
assertAllEqual
(
[
None
,
np
.
ceil
(
width
//
2
),
outx
.
shape
.
as_list
(),
np
.
ceil
(
height
//
2
),
(
filters
)])
[
None
,
np
.
ceil
(
width
//
2
),
return
np
.
ceil
(
height
//
2
),
(
filters
)])
return
@
parameterized
.
named_parameters
((
"same"
,
224
,
224
,
64
,
1
),
(
"downsample"
,
224
,
224
,
128
,
2
))
@
parameterized
.
named_parameters
((
"same"
,
224
,
224
,
64
,
1
),
def
test_gradient_pass_though
(
self
,
filters
,
width
,
height
,
mod
):
(
"downsample"
,
224
,
224
,
128
,
2
))
loss
=
ks
.
losses
.
MeanSquaredError
()
def
test_gradient_pass_though
(
self
,
filters
,
width
,
height
,
mod
):
optimizer
=
ks
.
optimizers
.
SGD
()
loss
=
ks
.
losses
.
MeanSquaredError
()
test_layer
=
layer
(
filters
,
filter_reduce
=
mod
)
optimizer
=
ks
.
optimizers
.
SGD
()
path_layer
=
layer_companion
(
filters
,
filter_reduce
=
mod
)
test_layer
=
layer
(
filters
,
filter_reduce
=
mod
)
path_layer
=
layer_companion
(
filters
,
filter_reduce
=
mod
)
init
=
tf
.
random_normal_initializer
()
x
=
tf
.
Variable
(
initial_value
=
init
(
shape
=
(
1
,
width
,
height
,
filters
),
init
=
tf
.
random_normal_initializer
()
dtype
=
tf
.
float32
))
x
=
tf
.
Variable
(
y
=
tf
.
Variable
(
initial_value
=
init
(
shape
=
(
1
,
int
(
np
.
ceil
(
width
//
2
)),
initial_value
=
init
(
shape
=
(
1
,
width
,
height
,
filters
),
dtype
=
tf
.
float32
))
int
(
np
.
ceil
(
height
//
2
)),
y
=
tf
.
Variable
(
initial_value
=
init
(
shape
=
(
1
,
int
(
np
.
ceil
(
width
//
2
)),
filters
),
int
(
np
.
ceil
(
height
//
2
)),
dtype
=
tf
.
float32
))
filters
),
dtype
=
tf
.
float32
))
with
tf
.
GradientTape
()
as
tape
:
x_hat
,
x_prev
=
test_layer
(
x
)
with
tf
.
GradientTape
()
as
tape
:
x_hat
=
path_layer
([
x_hat
,
x_prev
])
x_hat
,
x_prev
=
test_layer
(
x
)
grad_loss
=
loss
(
x_hat
,
y
)
x_hat
=
path_layer
([
x_hat
,
x_prev
])
grad
=
tape
.
gradient
(
grad_loss
,
test_layer
.
trainable_variables
)
grad_loss
=
loss
(
x_hat
,
y
)
optimizer
.
apply_gradients
(
zip
(
grad
,
test_layer
.
trainable_variables
))
grad
=
tape
.
gradient
(
grad_loss
,
test_layer
.
trainable_variables
)
optimizer
.
apply_gradients
(
zip
(
grad
,
test_layer
.
trainable_variables
))
self
.
assertNotIn
(
None
,
grad
)
return
self
.
assertNotIn
(
None
,
grad
)
return
if
__name__
==
"__main__"
:
if
__name__
==
"__main__"
:
tf
.
test
.
main
()
tf
.
test
.
main
()
official/vision/beta/projects/yolo/modeling/tests/test_CSPDownSample.py
View file @
e43d75f3
...
@@ -6,48 +6,49 @@ from absl.testing import parameterized
...
@@ -6,48 +6,49 @@ from absl.testing import parameterized
from
official.vision.beta.projects.yolo.modeling.building_blocks
import
CSPDownSample
as
layer
from
official.vision.beta.projects.yolo.modeling.building_blocks
import
CSPDownSample
as
layer
from
official.vision.beta.projects.yolo.modeling.building_blocks
import
CSPConnect
as
layer_companion
from
official.vision.beta.projects.yolo.modeling.building_blocks
import
CSPConnect
as
layer_companion
class
CSPDownSample
(
tf
.
test
.
TestCase
,
parameterized
.
TestCase
):
class
CSPDownSample
(
tf
.
test
.
TestCase
,
parameterized
.
TestCase
):
@
parameterized
.
named_parameters
((
"same"
,
224
,
224
,
64
,
1
),
(
"downsampl
e"
,
224
,
224
,
64
,
2
))
@
parameterized
.
named_parameters
((
"sam
e"
,
224
,
224
,
64
,
1
),
def
test_pass_through
(
self
,
width
,
height
,
filters
,
mod
):
(
"downsample"
,
224
,
224
,
64
,
2
))
x
=
ks
.
Input
(
shape
=
(
width
,
height
,
filters
))
def
test_pass_through
(
self
,
width
,
height
,
filters
,
mod
):
test_layer
=
layer
(
filters
=
filters
,
filter_reduce
=
mod
)
x
=
ks
.
Input
(
shape
=
(
width
,
height
,
filters
)
)
outx
,
px
=
test_layer
(
x
)
test_layer
=
layer
(
filters
=
filters
,
filter_reduce
=
mod
)
print
(
out
x
)
outx
,
px
=
test_layer
(
x
)
print
(
outx
.
shape
.
as_list
()
)
print
(
outx
)
self
.
assertAllEqual
(
print
(
outx
.
shape
.
as_list
())
outx
.
shape
.
as_list
(),
self
.
assertAllEqual
(
[
None
,
outx
.
shape
.
as_list
()
,
np
.
ceil
(
width
//
2
),
[
None
,
np
.
ceil
(
width
//
2
),
np
.
ceil
(
height
//
2
),
(
filters
/
mod
)])
np
.
ceil
(
height
//
2
),
(
filters
/
mod
)])
return
return
@
parameterized
.
named_parameters
((
"same"
,
224
,
224
,
64
,
1
),
@
parameterized
.
named_parameters
((
"same"
,
224
,
224
,
64
,
1
),
(
"downsample"
,
224
,
224
,
128
,
2
))
(
"downsample"
,
224
,
224
,
128
,
2
))
def
test_gradient_pass_though
(
self
,
filters
,
width
,
height
,
mod
):
def
test_gradient_pass_though
(
self
,
filters
,
width
,
height
,
mod
):
loss
=
ks
.
losses
.
MeanSquaredError
()
loss
=
ks
.
losses
.
MeanSquaredError
()
optimizer
=
ks
.
optimizers
.
SGD
()
optimizer
=
ks
.
optimizers
.
SGD
()
test_layer
=
layer
(
filters
,
filter_reduce
=
mod
)
test_layer
=
layer
(
filters
,
filter_reduce
=
mod
)
path_layer
=
layer_companion
(
filters
,
filter_reduce
=
mod
)
path_layer
=
layer_companion
(
filters
,
filter_reduce
=
mod
)
init
=
tf
.
random_normal_initializer
()
init
=
tf
.
random_normal_initializer
()
x
=
tf
.
Variable
(
initial_value
=
init
(
shape
=
(
1
,
width
,
height
,
filters
),
x
=
tf
.
Variable
(
dtype
=
tf
.
float32
))
initial_value
=
init
(
shape
=
(
1
,
width
,
height
,
filters
),
dtype
=
tf
.
float32
))
y
=
tf
.
Variable
(
initial_value
=
init
(
shape
=
(
1
,
int
(
np
.
ceil
(
width
//
2
)),
y
=
tf
.
Variable
(
initial_value
=
init
(
shape
=
(
1
,
int
(
np
.
ceil
(
width
//
2
)),
int
(
np
.
ceil
(
height
//
2
)),
int
(
np
.
ceil
(
height
//
2
)),
filters
),
filters
),
dtype
=
tf
.
float32
))
dtype
=
tf
.
float32
))
with
tf
.
GradientTape
()
as
tape
:
with
tf
.
GradientTape
()
as
tape
:
x_hat
,
x_prev
=
test_layer
(
x
)
x_hat
,
x_prev
=
test_layer
(
x
)
x_hat
=
path_layer
([
x_hat
,
x_prev
])
x_hat
=
path_layer
([
x_hat
,
x_prev
])
grad_loss
=
loss
(
x_hat
,
y
)
grad_loss
=
loss
(
x_hat
,
y
)
grad
=
tape
.
gradient
(
grad_loss
,
test_layer
.
trainable_variables
)
grad
=
tape
.
gradient
(
grad_loss
,
test_layer
.
trainable_variables
)
optimizer
.
apply_gradients
(
zip
(
grad
,
test_layer
.
trainable_variables
))
optimizer
.
apply_gradients
(
zip
(
grad
,
test_layer
.
trainable_variables
))
self
.
assertNotIn
(
None
,
grad
)
self
.
assertNotIn
(
None
,
grad
)
return
return
if
__name__
==
"__main__"
:
if
__name__
==
"__main__"
:
tf
.
test
.
main
()
tf
.
test
.
main
()
official/vision/beta/projects/yolo/modeling/tests/test_DarkConv.py
View file @
e43d75f3
...
@@ -5,67 +5,68 @@ from absl.testing import parameterized
...
@@ -5,67 +5,68 @@ from absl.testing import parameterized
from
official.vision.beta.projects.yolo.modeling.building_blocks
import
DarkConv
from
official.vision.beta.projects.yolo.modeling.building_blocks
import
DarkConv
class
DarkConvTest
(
tf
.
test
.
TestCase
,
parameterized
.
TestCase
):
class
DarkConvTest
(
tf
.
test
.
TestCase
,
parameterized
.
TestCase
):
@
parameterized
.
named_parameters
((
"valid"
,
(
3
,
3
),
"valid"
,
(
1
,
1
)),
(
"same"
,
(
3
,
3
),
"same"
,
(
1
,
1
)),
(
"downsample"
,
(
3
,
3
),
"same"
,
(
2
,
2
)),
(
"test"
,
(
1
,
1
),
"valid"
,
(
1
,
1
)))
def
test_pass_through
(
self
,
kernel_size
,
padding
,
strides
):
if
padding
==
"same"
:
pad_const
=
1
else
:
pad_const
=
0
x
=
ks
.
Input
(
shape
=
(
224
,
224
,
3
))
test_layer
=
DarkConv
(
filters
=
64
,
kernel_size
=
kernel_size
,
padding
=
padding
,
strides
=
strides
,
trainable
=
False
)
outx
=
test_layer
(
x
)
print
(
outx
.
shape
.
as_list
())
test
=
[
None
,
int
((
224
-
kernel_size
[
0
]
+
(
2
*
pad_const
))
/
strides
[
0
]
+
1
),
int
((
224
-
kernel_size
[
1
]
+
(
2
*
pad_const
))
/
strides
[
1
]
+
1
),
64
]
print
(
test
)
self
.
assertAllEqual
(
outx
.
shape
.
as_list
(),
test
)
return
@
parameterized
.
named_parameters
((
"filters"
,
3
))
@
parameterized
.
named_parameters
(
def
test_gradient_pass_though
(
self
,
filters
):
(
"valid"
,
(
3
,
3
),
"valid"
,
(
1
,
1
)),
(
"same"
,
(
3
,
3
),
"same"
,
(
1
,
1
)),
loss
=
ks
.
losses
.
MeanSquaredError
()
(
"downsample"
,
(
3
,
3
),
"same"
,
(
2
,
2
)),
(
"test"
,
(
1
,
1
),
"valid"
,
(
1
,
1
)))
optimizer
=
ks
.
optimizers
.
SGD
()
def
test_pass_through
(
self
,
kernel_size
,
padding
,
strides
):
with
tf
.
device
(
"/CPU:0"
):
if
padding
==
"same"
:
test_layer
=
DarkConv
(
filters
,
kernel_size
=
(
3
,
3
),
padding
=
"same"
)
pad_const
=
1
else
:
pad_const
=
0
x
=
ks
.
Input
(
shape
=
(
224
,
224
,
3
))
test_layer
=
DarkConv
(
filters
=
64
,
kernel_size
=
kernel_size
,
padding
=
padding
,
strides
=
strides
,
trainable
=
False
)
outx
=
test_layer
(
x
)
print
(
outx
.
shape
.
as_list
())
test
=
[
None
,
int
((
224
-
kernel_size
[
0
]
+
(
2
*
pad_const
))
/
strides
[
0
]
+
1
),
int
((
224
-
kernel_size
[
1
]
+
(
2
*
pad_const
))
/
strides
[
1
]
+
1
),
64
]
print
(
test
)
self
.
assertAllEqual
(
outx
.
shape
.
as_list
(),
test
)
return
@
parameterized
.
named_parameters
((
"filters"
,
3
))
def
test_gradient_pass_though
(
self
,
filters
):
loss
=
ks
.
losses
.
MeanSquaredError
()
optimizer
=
ks
.
optimizers
.
SGD
()
with
tf
.
device
(
"/CPU:0"
):
test_layer
=
DarkConv
(
filters
,
kernel_size
=
(
3
,
3
),
padding
=
"same"
)
init
=
tf
.
random_normal_initializer
()
init
=
tf
.
random_normal_initializer
()
x
=
tf
.
Variable
(
x
=
tf
.
Variable
(
initial_value
=
init
(
shape
=
(
1
,
224
,
224
,
initial_value
=
init
(
shape
=
(
1
,
224
,
224
,
3
),
dtype
=
tf
.
float32
))
3
),
dtype
=
tf
.
float32
))
y
=
tf
.
Variable
(
y
=
tf
.
Variable
(
initial_value
=
init
(
shape
=
(
1
,
224
,
224
,
filters
),
dtype
=
tf
.
float32
))
initial_value
=
init
(
shape
=
(
1
,
224
,
224
,
filters
),
dtype
=
tf
.
float32
))
with
tf
.
GradientTape
()
as
tape
:
with
tf
.
GradientTape
()
as
tape
:
x_hat
=
test_layer
(
x
)
x_hat
=
test_layer
(
x
)
grad_loss
=
loss
(
x_hat
,
y
)
grad_loss
=
loss
(
x_hat
,
y
)
grad
=
tape
.
gradient
(
grad_loss
,
test_layer
.
trainable_variables
)
grad
=
tape
.
gradient
(
grad_loss
,
test_layer
.
trainable_variables
)
optimizer
.
apply_gradients
(
zip
(
grad
,
test_layer
.
trainable_variables
))
optimizer
.
apply_gradients
(
zip
(
grad
,
test_layer
.
trainable_variables
))
self
.
assertNotIn
(
None
,
grad
)
self
.
assertNotIn
(
None
,
grad
)
return
return
# @parameterized.named_parameters(("filters", 3), ("filters", 20), ("filters", 512))
# @parameterized.named_parameters(("filters", 3), ("filters", 20), ("filters", 512))
# def test_time(self, filters):
# def test_time(self, filters):
# # finish the test for time
# # finish the test for time
# dataset = tfds.load("mnist")
# dataset = tfds.load("mnist")
# model = ks.Sequential([
# model = ks.Sequential([
# DarkConv(7, kernel_size=(3,3), strides = (2,2), activation='relu'),
# DarkConv(7, kernel_size=(3,3), strides = (2,2), activation='relu'),
# DarkConv(10, kernel_size=(3,3), strides = (2,2), activation='relu'),
# DarkConv(10, kernel_size=(3,3), strides = (2,2), activation='relu'),
# DarkConv(filters, kernel_size=(3,3), strides = (1,1), activation='relu'),
# DarkConv(filters, kernel_size=(3,3), strides = (1,1), activation='relu'),
# DarkConv(9, kernel_size=(3,3), strides = (2,2), activation='relu'),
# DarkConv(9, kernel_size=(3,3), strides = (2,2), activation='relu'),
# ks.layers.GlobalAveragePooling2D(),
# ks.layers.GlobalAveragePooling2D(),
# ks.layers.Dense(10, activation='softmax')], name='test')
# ks.layers.Dense(10, activation='softmax')], name='test')
# return
# return
if
__name__
==
"__main__"
:
if
__name__
==
"__main__"
:
tf
.
test
.
main
()
tf
.
test
.
main
()
official/vision/beta/projects/yolo/modeling/tests/test_DarkResidual.py
View file @
e43d75f3
...
@@ -7,54 +7,55 @@ from official.vision.beta.projects.yolo.modeling.building_blocks import DarkResi
...
@@ -7,54 +7,55 @@ from official.vision.beta.projects.yolo.modeling.building_blocks import DarkResi
class
DarkResidualTest
(
tf
.
test
.
TestCase
,
parameterized
.
TestCase
):
class
DarkResidualTest
(
tf
.
test
.
TestCase
,
parameterized
.
TestCase
):
@
parameterized
.
named_parameters
((
"same"
,
224
,
224
,
64
,
False
),
(
"downsample"
,
223
,
223
,
32
,
True
),
@
parameterized
.
named_parameters
((
"same"
,
224
,
224
,
64
,
False
),
(
"oddball"
,
223
,
223
,
32
,
False
))
(
"downsample"
,
223
,
223
,
32
,
True
),
def
test_pass_through
(
self
,
width
,
height
,
filters
,
downsample
):
(
"oddball"
,
223
,
223
,
32
,
False
))
mod
=
1
def
test_pass_through
(
self
,
width
,
height
,
filters
,
downsample
):
if
downsample
:
mod
=
1
mod
=
2
if
downsample
:
x
=
ks
.
Input
(
shape
=
(
width
,
height
,
filters
))
mod
=
2
test_layer
=
layer
(
filters
=
filters
,
downsample
=
downsample
)
x
=
ks
.
Input
(
shape
=
(
width
,
height
,
filters
))
outx
=
test_layer
(
x
)
test_layer
=
layer
(
filters
=
filters
,
downsample
=
downsample
)
print
(
outx
)
outx
=
test_layer
(
x
)
print
(
outx
.
shape
.
as_list
())
print
(
outx
)
self
.
assertAllEqual
(
print
(
outx
.
shape
.
as_list
())
outx
.
shape
.
as_list
(),
self
.
assertAllEqual
(
[
None
,
np
.
ceil
(
width
/
mod
),
outx
.
shape
.
as_list
(),
np
.
ceil
(
height
/
mod
),
filters
])
[
None
,
np
.
ceil
(
width
/
mod
),
return
np
.
ceil
(
height
/
mod
),
filters
])
return
@
parameterized
.
named_parameters
((
"same"
,
64
,
224
,
224
,
False
),
(
"downsample"
,
32
,
223
,
223
,
True
),
@
parameterized
.
named_parameters
((
"same"
,
64
,
224
,
224
,
False
),
(
"oddball"
,
32
,
223
,
223
,
False
))
(
"downsample"
,
32
,
223
,
223
,
True
),
def
test_gradient_pass_though
(
self
,
filters
,
width
,
height
,
downsample
):
(
"oddball"
,
32
,
223
,
223
,
False
))
loss
=
ks
.
losses
.
MeanSquaredError
()
def
test_gradient_pass_though
(
self
,
filters
,
width
,
height
,
downsample
):
optimizer
=
ks
.
optimizers
.
SGD
()
loss
=
ks
.
losses
.
MeanSquaredError
()
test_layer
=
layer
(
filters
,
downsample
=
downsample
)
optimizer
=
ks
.
optimizers
.
SGD
()
test_layer
=
layer
(
filters
,
downsample
=
downsample
)
if
downsample
:
mod
=
2
if
downsample
:
else
:
mod
=
2
mod
=
1
else
:
mod
=
1
init
=
tf
.
random_normal_initializer
()
x
=
tf
.
Variable
(
initial_value
=
init
(
shape
=
(
1
,
width
,
height
,
filters
),
init
=
tf
.
random_normal_initializer
()
dtype
=
tf
.
float32
))
x
=
tf
.
Variable
(
y
=
tf
.
Variable
(
initial_value
=
init
(
shape
=
(
1
,
int
(
np
.
ceil
(
width
/
mod
)),
initial_value
=
init
(
shape
=
(
1
,
width
,
height
,
filters
),
dtype
=
tf
.
float32
))
int
(
np
.
ceil
(
height
/
mod
)),
y
=
tf
.
Variable
(
initial_value
=
init
(
shape
=
(
1
,
int
(
np
.
ceil
(
width
/
mod
)),
filters
),
int
(
np
.
ceil
(
height
/
mod
)),
dtype
=
tf
.
float32
))
filters
),
dtype
=
tf
.
float32
))
with
tf
.
GradientTape
()
as
tape
:
x_hat
=
test_layer
(
x
)
with
tf
.
GradientTape
()
as
tape
:
grad_loss
=
loss
(
x_hat
,
y
)
x_hat
=
test_layer
(
x
)
grad
=
tape
.
gradient
(
grad_loss
,
test_layer
.
trainable_variables
)
grad_loss
=
loss
(
x_hat
,
y
)
optimizer
.
apply_gradients
(
zip
(
grad
,
test_layer
.
trainable_variables
))
grad
=
tape
.
gradient
(
grad_loss
,
test_layer
.
trainable_variables
)
optimizer
.
apply_gradients
(
zip
(
grad
,
test_layer
.
trainable_variables
))
self
.
assertNotIn
(
None
,
grad
)
return
self
.
assertNotIn
(
None
,
grad
)
return
if
__name__
==
"__main__"
:
if
__name__
==
"__main__"
:
tf
.
test
.
main
()
tf
.
test
.
main
()
official/vision/beta/projects/yolo/modeling/tests/test_DarkTiny.py
View file @
e43d75f3
...
@@ -7,44 +7,44 @@ from official.vision.beta.projects.yolo.modeling.building_blocks import DarkTiny
...
@@ -7,44 +7,44 @@ from official.vision.beta.projects.yolo.modeling.building_blocks import DarkTiny
class
DarkTinyTest
(
tf
.
test
.
TestCase
,
parameterized
.
TestCase
):
class
DarkTinyTest
(
tf
.
test
.
TestCase
,
parameterized
.
TestCase
):
@
parameterized
.
named_parameters
((
"middle"
,
224
,
224
,
64
,
2
),
(
"last
"
,
224
,
224
,
1024
,
1
))
@
parameterized
.
named_parameters
((
"middle
"
,
224
,
224
,
64
,
2
),
def
test_pass_through
(
self
,
width
,
height
,
filters
,
strides
):
(
"last"
,
224
,
224
,
1024
,
1
))
x
=
ks
.
Input
(
shape
=
(
width
,
height
,
filters
))
def
test_pass_through
(
self
,
width
,
height
,
filters
,
strides
):
test_layer
=
DarkTiny
(
filters
=
filters
,
strides
=
stride
s
)
x
=
ks
.
Input
(
shape
=
(
width
,
height
,
filter
s
)
)
outx
=
test_layer
(
x
)
test_layer
=
DarkTiny
(
filters
=
filters
,
strides
=
strides
)
print
(
out
x
)
outx
=
test_layer
(
x
)
print
(
outx
.
shape
.
as_list
()
)
print
(
outx
)
self
.
assertEqual
(
width
%
strides
,
0
,
msg
=
"width % strides != 0"
)
print
(
outx
.
shape
.
as_list
()
)
self
.
assertEqual
(
height
%
strides
,
0
,
msg
=
"
height
% strides != 0"
)
self
.
assertEqual
(
width
%
strides
,
0
,
msg
=
"
width
% strides != 0"
)
self
.
assert
All
Equal
(
self
.
assertEqual
(
height
%
strides
,
0
,
msg
=
"height % strides != 0"
)
outx
.
shape
.
as_list
(),
self
.
assertAllEqual
(
outx
.
shape
.
as_list
(),
[
None
,
width
//
strides
,
height
//
strides
,
filters
])
[
None
,
width
//
strides
,
height
//
strides
,
filters
])
return
return
@
parameterized
.
named_parameters
((
"middle"
,
224
,
224
,
64
,
2
),
@
parameterized
.
named_parameters
((
"middle"
,
224
,
224
,
64
,
2
),
(
"last"
,
224
,
224
,
1024
,
1
))
(
"last"
,
224
,
224
,
1024
,
1
))
def
test_gradient_pass_though
(
self
,
width
,
height
,
filters
,
strides
):
def
test_gradient_pass_though
(
self
,
width
,
height
,
filters
,
strides
):
loss
=
ks
.
losses
.
MeanSquaredError
()
loss
=
ks
.
losses
.
MeanSquaredError
()
optimizer
=
ks
.
optimizers
.
SGD
()
optimizer
=
ks
.
optimizers
.
SGD
()
test_layer
=
DarkTiny
(
filters
=
filters
,
strides
=
strides
)
test_layer
=
DarkTiny
(
filters
=
filters
,
strides
=
strides
)
init
=
tf
.
random_normal_initializer
()
init
=
tf
.
random_normal_initializer
()
x
=
tf
.
Variable
(
initial_value
=
init
(
shape
=
(
1
,
width
,
height
,
filters
),
x
=
tf
.
Variable
(
dtype
=
tf
.
float32
))
initial_value
=
init
(
shape
=
(
1
,
width
,
height
,
filters
),
dtype
=
tf
.
float32
))
y
=
tf
.
Variable
(
initial_value
=
init
(
shape
=
(
1
,
width
//
strides
,
y
=
tf
.
Variable
(
initial_value
=
init
(
shape
=
(
1
,
width
//
strides
,
height
//
strides
,
filters
),
height
//
strides
,
filters
),
dtype
=
tf
.
float32
))
dtype
=
tf
.
float32
))
with
tf
.
GradientTape
()
as
tape
:
with
tf
.
GradientTape
()
as
tape
:
x_hat
=
test_layer
(
x
)
x_hat
=
test_layer
(
x
)
grad_loss
=
loss
(
x_hat
,
y
)
grad_loss
=
loss
(
x_hat
,
y
)
grad
=
tape
.
gradient
(
grad_loss
,
test_layer
.
trainable_variables
)
grad
=
tape
.
gradient
(
grad_loss
,
test_layer
.
trainable_variables
)
optimizer
.
apply_gradients
(
zip
(
grad
,
test_layer
.
trainable_variables
))
optimizer
.
apply_gradients
(
zip
(
grad
,
test_layer
.
trainable_variables
))
self
.
assertNotIn
(
None
,
grad
)
self
.
assertNotIn
(
None
,
grad
)
return
return
if
__name__
==
"__main__"
:
if
__name__
==
"__main__"
:
tf
.
test
.
main
()
tf
.
test
.
main
()
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment