Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ModelZoo
ResNet50_tensorflow
Commits
2b676a9b
Commit
2b676a9b
authored
Jun 16, 2021
by
Gunho Park
Browse files
Merge remote-tracking branch 'upstream/master'
parents
6ddd627a
bcbce005
Changes
28
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
8 changed files
with
1696 additions
and
442 deletions
+1696
-442
official/vision/beta/projects/yolo/modeling/decoders/yolo_decoder_test.py
...beta/projects/yolo/modeling/decoders/yolo_decoder_test.py
+153
-0
official/vision/beta/projects/yolo/modeling/heads/__init__.py
...cial/vision/beta/projects/yolo/modeling/heads/__init__.py
+14
-0
official/vision/beta/projects/yolo/modeling/heads/yolo_head.py
...ial/vision/beta/projects/yolo/modeling/heads/yolo_head.py
+122
-0
official/vision/beta/projects/yolo/modeling/heads/yolo_head_test.py
...ision/beta/projects/yolo/modeling/heads/yolo_head_test.py
+74
-0
official/vision/beta/projects/yolo/modeling/layers/nn_blocks.py
...al/vision/beta/projects/yolo/modeling/layers/nn_blocks.py
+1179
-364
official/vision/beta/projects/yolo/modeling/layers/nn_blocks_test.py
...sion/beta/projects/yolo/modeling/layers/nn_blocks_test.py
+149
-48
research/object_detection/core/anchor_generator.py
research/object_detection/core/anchor_generator.py
+3
-29
research/object_detection/models/keras_models/resnet_v1.py
research/object_detection/models/keras_models/resnet_v1.py
+2
-1
No files found.
official/vision/beta/projects/yolo/modeling/decoders/yolo_decoder_test.py
0 → 100644
View file @
2b676a9b
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for YOLO."""
# Import libraries
from
absl.testing
import
parameterized
import
tensorflow
as
tf
from
tensorflow.python.distribute
import
combinations
from
tensorflow.python.distribute
import
strategy_combinations
from
official.vision.beta.projects.yolo.modeling.decoders
import
yolo_decoder
as
decoders
class
YoloDecoderTest
(
parameterized
.
TestCase
,
tf
.
test
.
TestCase
):
def
_build_yolo_decoder
(
self
,
input_specs
,
name
=
'1'
):
# Builds 4 different arbitrary decoders.
if
name
==
'1'
:
model
=
decoders
.
YoloDecoder
(
input_specs
=
input_specs
,
embed_spp
=
False
,
use_fpn
=
False
,
max_level_process_len
=
2
,
path_process_len
=
1
,
activation
=
'mish'
)
elif
name
==
'6spp'
:
model
=
decoders
.
YoloDecoder
(
input_specs
=
input_specs
,
embed_spp
=
True
,
use_fpn
=
False
,
max_level_process_len
=
None
,
path_process_len
=
6
,
activation
=
'mish'
)
elif
name
==
'6sppfpn'
:
model
=
decoders
.
YoloDecoder
(
input_specs
=
input_specs
,
embed_spp
=
True
,
use_fpn
=
True
,
max_level_process_len
=
None
,
path_process_len
=
6
,
activation
=
'mish'
)
elif
name
==
'6'
:
model
=
decoders
.
YoloDecoder
(
input_specs
=
input_specs
,
embed_spp
=
False
,
use_fpn
=
False
,
max_level_process_len
=
None
,
path_process_len
=
6
,
activation
=
'mish'
)
else
:
raise
NotImplementedError
(
f
'YOLO decoder test
{
type
}
not implemented.'
)
return
model
@
parameterized
.
parameters
(
'1'
,
'6spp'
,
'6sppfpn'
,
'6'
)
def
test_network_creation
(
self
,
version
):
"""Test creation of ResNet family models."""
tf
.
keras
.
backend
.
set_image_data_format
(
'channels_last'
)
input_shape
=
{
'3'
:
[
1
,
52
,
52
,
256
],
'4'
:
[
1
,
26
,
26
,
512
],
'5'
:
[
1
,
13
,
13
,
1024
]
}
decoder
=
self
.
_build_yolo_decoder
(
input_shape
,
version
)
inputs
=
{}
for
key
in
input_shape
:
inputs
[
key
]
=
tf
.
ones
(
input_shape
[
key
],
dtype
=
tf
.
float32
)
endpoints
=
decoder
.
call
(
inputs
)
for
key
in
endpoints
.
keys
():
self
.
assertAllEqual
(
endpoints
[
key
].
shape
.
as_list
(),
input_shape
[
key
])
@
combinations
.
generate
(
combinations
.
combine
(
strategy
=
[
strategy_combinations
.
cloud_tpu_strategy
,
strategy_combinations
.
one_device_strategy_gpu
,
],
use_sync_bn
=
[
False
,
True
],
))
def
test_sync_bn_multiple_devices
(
self
,
strategy
,
use_sync_bn
):
"""Test for sync bn on TPU and GPU devices."""
tf
.
keras
.
backend
.
set_image_data_format
(
'channels_last'
)
with
strategy
.
scope
():
input_shape
=
{
'3'
:
[
1
,
52
,
52
,
256
],
'4'
:
[
1
,
26
,
26
,
512
],
'5'
:
[
1
,
13
,
13
,
1024
]
}
decoder
=
self
.
_build_yolo_decoder
(
input_shape
,
'6'
)
inputs
=
{}
for
key
in
input_shape
:
inputs
[
key
]
=
tf
.
ones
(
input_shape
[
key
],
dtype
=
tf
.
float32
)
_
=
decoder
.
call
(
inputs
)
@
parameterized
.
parameters
(
1
,
3
,
4
)
def
test_input_specs
(
self
,
input_dim
):
"""Test different input feature dimensions."""
tf
.
keras
.
backend
.
set_image_data_format
(
'channels_last'
)
input_shape
=
{
'3'
:
[
1
,
52
,
52
,
256
],
'4'
:
[
1
,
26
,
26
,
512
],
'5'
:
[
1
,
13
,
13
,
1024
]
}
decoder
=
self
.
_build_yolo_decoder
(
input_shape
,
'6'
)
inputs
=
{}
for
key
in
input_shape
:
inputs
[
key
]
=
tf
.
ones
(
input_shape
[
key
],
dtype
=
tf
.
float32
)
_
=
decoder
(
inputs
)
def
test_serialize_deserialize
(
self
):
"""Create a network object that sets all of its config options."""
tf
.
keras
.
backend
.
set_image_data_format
(
'channels_last'
)
input_shape
=
{
'3'
:
[
1
,
52
,
52
,
256
],
'4'
:
[
1
,
26
,
26
,
512
],
'5'
:
[
1
,
13
,
13
,
1024
]
}
decoder
=
self
.
_build_yolo_decoder
(
input_shape
,
'6'
)
inputs
=
{}
for
key
in
input_shape
:
inputs
[
key
]
=
tf
.
ones
(
input_shape
[
key
],
dtype
=
tf
.
float32
)
_
=
decoder
(
inputs
)
config
=
decoder
.
get_config
()
decoder_from_config
=
decoders
.
YoloDecoder
.
from_config
(
config
)
self
.
assertAllEqual
(
decoder
.
get_config
(),
decoder_from_config
.
get_config
())
if
__name__
==
'__main__'
:
tf
.
test
.
main
()
official/vision/beta/projects/yolo/modeling/heads/__init__.py
0 → 100644
View file @
2b676a9b
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
official/vision/beta/projects/yolo/modeling/heads/yolo_head.py
0 → 100644
View file @
2b676a9b
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Yolo heads."""
import
tensorflow
as
tf
from
official.vision.beta.projects.yolo.modeling.layers
import
nn_blocks
class
YoloHead
(
tf
.
keras
.
layers
.
Layer
):
"""YOLO Prediction Head."""
def
__init__
(
self
,
min_level
,
max_level
,
classes
=
80
,
boxes_per_level
=
3
,
output_extras
=
0
,
norm_momentum
=
0.99
,
norm_epsilon
=
0.001
,
kernel_initializer
=
'glorot_uniform'
,
kernel_regularizer
=
None
,
bias_regularizer
=
None
,
activation
=
None
,
**
kwargs
):
"""Yolo Prediction Head initialization function.
Args:
min_level: `int`, the minimum backbone output level.
max_level: `int`, the maximum backbone output level.
classes: `int`, number of classes per category.
boxes_per_level: `int`, number of boxes to predict per level.
output_extras: `int`, number of additional output channels that the head.
should predict for non-object detection and non-image classification
tasks.
norm_momentum: `float`, normalization momentum for the moving average.
norm_epsilon: `float`, small float added to variance to avoid dividing by
zero.
kernel_initializer: kernel_initializer for convolutional layers.
kernel_regularizer: tf.keras.regularizers.Regularizer object for Conv2D.
bias_regularizer: tf.keras.regularizers.Regularizer object for Conv2d.
activation: `str`, the activation function to use typically leaky or mish.
**kwargs: keyword arguments to be passed.
"""
super
().
__init__
(
**
kwargs
)
self
.
_min_level
=
min_level
self
.
_max_level
=
max_level
self
.
_key_list
=
[
str
(
key
)
for
key
in
range
(
self
.
_min_level
,
self
.
_max_level
+
1
)
]
self
.
_classes
=
classes
self
.
_boxes_per_level
=
boxes_per_level
self
.
_output_extras
=
output_extras
self
.
_output_conv
=
(
classes
+
output_extras
+
5
)
*
boxes_per_level
self
.
_base_config
=
dict
(
activation
=
activation
,
norm_momentum
=
norm_momentum
,
norm_epsilon
=
norm_epsilon
,
kernel_initializer
=
kernel_initializer
,
kernel_regularizer
=
kernel_regularizer
,
bias_regularizer
=
bias_regularizer
)
self
.
_conv_config
=
dict
(
filters
=
self
.
_output_conv
,
kernel_size
=
(
1
,
1
),
strides
=
(
1
,
1
),
padding
=
'same'
,
use_bn
=
False
,
**
self
.
_base_config
)
def
build
(
self
,
input_shape
):
self
.
_head
=
dict
()
for
key
in
self
.
_key_list
:
self
.
_head
[
key
]
=
nn_blocks
.
ConvBN
(
**
self
.
_conv_config
)
def
call
(
self
,
inputs
):
outputs
=
dict
()
for
key
in
self
.
_key_list
:
outputs
[
key
]
=
self
.
_head
[
key
](
inputs
[
key
])
return
outputs
@
property
def
output_depth
(
self
):
return
(
self
.
_classes
+
self
.
_output_extras
+
5
)
*
self
.
_boxes_per_level
@
property
def
num_boxes
(
self
):
if
self
.
_min_level
is
None
or
self
.
_max_level
is
None
:
raise
Exception
(
'Model has to be built before number of boxes can be determined.'
)
return
(
self
.
_max_level
-
self
.
_min_level
+
1
)
*
self
.
_boxes_per_level
def
get_config
(
self
):
config
=
dict
(
min_level
=
self
.
_min_level
,
max_level
=
self
.
_max_level
,
classes
=
self
.
_classes
,
boxes_per_level
=
self
.
_boxes_per_level
,
output_extras
=
self
.
_output_extras
,
**
self
.
_base_config
)
return
config
@
classmethod
def
from_config
(
cls
,
config
,
custom_objects
=
None
):
return
cls
(
**
config
)
official/vision/beta/projects/yolo/modeling/heads/yolo_head_test.py
0 → 100644
View file @
2b676a9b
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for yolo heads."""
# Import libraries
from
absl.testing
import
parameterized
import
tensorflow
as
tf
from
official.vision.beta.projects.yolo.modeling.heads
import
yolo_head
as
heads
class
YoloDecoderTest
(
parameterized
.
TestCase
,
tf
.
test
.
TestCase
):
def
test_network_creation
(
self
):
"""Test creation of YOLO family models."""
tf
.
keras
.
backend
.
set_image_data_format
(
'channels_last'
)
input_shape
=
{
'3'
:
[
1
,
52
,
52
,
256
],
'4'
:
[
1
,
26
,
26
,
512
],
'5'
:
[
1
,
13
,
13
,
1024
]
}
classes
=
100
bps
=
3
head
=
heads
.
YoloHead
(
3
,
5
,
classes
=
classes
,
boxes_per_level
=
bps
)
inputs
=
{}
for
key
in
input_shape
:
inputs
[
key
]
=
tf
.
ones
(
input_shape
[
key
],
dtype
=
tf
.
float32
)
endpoints
=
head
(
inputs
)
# print(endpoints)
for
key
in
endpoints
.
keys
():
expected_input_shape
=
input_shape
[
key
]
expected_input_shape
[
-
1
]
=
(
classes
+
5
)
*
bps
self
.
assertAllEqual
(
endpoints
[
key
].
shape
.
as_list
(),
expected_input_shape
)
def
test_serialize_deserialize
(
self
):
# Create a network object that sets all of its config options.
tf
.
keras
.
backend
.
set_image_data_format
(
'channels_last'
)
input_shape
=
{
'3'
:
[
1
,
52
,
52
,
256
],
'4'
:
[
1
,
26
,
26
,
512
],
'5'
:
[
1
,
13
,
13
,
1024
]
}
classes
=
100
bps
=
3
head
=
heads
.
YoloHead
(
3
,
5
,
classes
=
classes
,
boxes_per_level
=
bps
)
inputs
=
{}
for
key
in
input_shape
:
inputs
[
key
]
=
tf
.
ones
(
input_shape
[
key
],
dtype
=
tf
.
float32
)
_
=
head
(
inputs
)
configs
=
head
.
get_config
()
head_from_config
=
heads
.
YoloHead
.
from_config
(
configs
)
self
.
assertAllEqual
(
head
.
get_config
(),
head_from_config
.
get_config
())
if
__name__
==
'__main__'
:
tf
.
test
.
main
()
official/vision/beta/projects/yolo/modeling/layers/nn_blocks.py
View file @
2b676a9b
This diff is collapsed.
Click to expand it.
official/vision/beta/projects/yolo/modeling/layers/nn_blocks_test.py
View file @
2b676a9b
...
...
@@ -13,7 +13,6 @@
# limitations under the License.
# Lint as: python3
from
absl.testing
import
parameterized
import
numpy
as
np
import
tensorflow
as
tf
...
...
@@ -23,8 +22,8 @@ from official.vision.beta.projects.yolo.modeling.layers import nn_blocks
class
CSPConnectTest
(
tf
.
test
.
TestCase
,
parameterized
.
TestCase
):
@
parameterized
.
named_parameters
((
"
same
"
,
224
,
224
,
64
,
1
),
(
"
downsample
"
,
224
,
224
,
64
,
2
))
@
parameterized
.
named_parameters
((
'
same
'
,
224
,
224
,
64
,
1
),
(
'
downsample
'
,
224
,
224
,
64
,
2
))
def
test_pass_through
(
self
,
width
,
height
,
filters
,
mod
):
x
=
tf
.
keras
.
Input
(
shape
=
(
width
,
height
,
filters
))
test_layer
=
nn_blocks
.
CSPRoute
(
filters
=
filters
,
filter_scale
=
mod
)
...
...
@@ -38,8 +37,8 @@ class CSPConnectTest(tf.test.TestCase, parameterized.TestCase):
[
None
,
np
.
ceil
(
width
//
2
),
np
.
ceil
(
height
//
2
),
(
filters
)])
@
parameterized
.
named_parameters
((
"
same
"
,
224
,
224
,
64
,
1
),
(
"
downsample
"
,
224
,
224
,
128
,
2
))
@
parameterized
.
named_parameters
((
'
same
'
,
224
,
224
,
64
,
1
),
(
'
downsample
'
,
224
,
224
,
128
,
2
))
def
test_gradient_pass_though
(
self
,
filters
,
width
,
height
,
mod
):
loss
=
tf
.
keras
.
losses
.
MeanSquaredError
()
optimizer
=
tf
.
keras
.
optimizers
.
SGD
()
...
...
@@ -49,10 +48,11 @@ class CSPConnectTest(tf.test.TestCase, parameterized.TestCase):
init
=
tf
.
random_normal_initializer
()
x
=
tf
.
Variable
(
initial_value
=
init
(
shape
=
(
1
,
width
,
height
,
filters
),
dtype
=
tf
.
float32
))
y
=
tf
.
Variable
(
initial_value
=
init
(
shape
=
(
1
,
int
(
np
.
ceil
(
width
//
2
)),
int
(
np
.
ceil
(
height
//
2
)),
filters
),
dtype
=
tf
.
float32
))
y
=
tf
.
Variable
(
initial_value
=
init
(
shape
=
(
1
,
int
(
np
.
ceil
(
width
//
2
)),
int
(
np
.
ceil
(
height
//
2
)),
filters
),
dtype
=
tf
.
float32
))
with
tf
.
GradientTape
()
as
tape
:
x_hat
,
x_prev
=
test_layer
(
x
)
...
...
@@ -66,8 +66,8 @@ class CSPConnectTest(tf.test.TestCase, parameterized.TestCase):
class
CSPRouteTest
(
tf
.
test
.
TestCase
,
parameterized
.
TestCase
):
@
parameterized
.
named_parameters
((
"
same
"
,
224
,
224
,
64
,
1
),
(
"
downsample
"
,
224
,
224
,
64
,
2
))
@
parameterized
.
named_parameters
((
'
same
'
,
224
,
224
,
64
,
1
),
(
'
downsample
'
,
224
,
224
,
64
,
2
))
def
test_pass_through
(
self
,
width
,
height
,
filters
,
mod
):
x
=
tf
.
keras
.
Input
(
shape
=
(
width
,
height
,
filters
))
test_layer
=
nn_blocks
.
CSPRoute
(
filters
=
filters
,
filter_scale
=
mod
)
...
...
@@ -79,8 +79,8 @@ class CSPRouteTest(tf.test.TestCase, parameterized.TestCase):
[
None
,
np
.
ceil
(
width
//
2
),
np
.
ceil
(
height
//
2
),
(
filters
/
mod
)])
@
parameterized
.
named_parameters
((
"
same
"
,
224
,
224
,
64
,
1
),
(
"
downsample
"
,
224
,
224
,
128
,
2
))
@
parameterized
.
named_parameters
((
'
same
'
,
224
,
224
,
64
,
1
),
(
'
downsample
'
,
224
,
224
,
128
,
2
))
def
test_gradient_pass_though
(
self
,
filters
,
width
,
height
,
mod
):
loss
=
tf
.
keras
.
losses
.
MeanSquaredError
()
optimizer
=
tf
.
keras
.
optimizers
.
SGD
()
...
...
@@ -90,10 +90,11 @@ class CSPRouteTest(tf.test.TestCase, parameterized.TestCase):
init
=
tf
.
random_normal_initializer
()
x
=
tf
.
Variable
(
initial_value
=
init
(
shape
=
(
1
,
width
,
height
,
filters
),
dtype
=
tf
.
float32
))
y
=
tf
.
Variable
(
initial_value
=
init
(
shape
=
(
1
,
int
(
np
.
ceil
(
width
//
2
)),
int
(
np
.
ceil
(
height
//
2
)),
filters
),
dtype
=
tf
.
float32
))
y
=
tf
.
Variable
(
initial_value
=
init
(
shape
=
(
1
,
int
(
np
.
ceil
(
width
//
2
)),
int
(
np
.
ceil
(
height
//
2
)),
filters
),
dtype
=
tf
.
float32
))
with
tf
.
GradientTape
()
as
tape
:
x_hat
,
x_prev
=
test_layer
(
x
)
...
...
@@ -107,11 +108,11 @@ class CSPRouteTest(tf.test.TestCase, parameterized.TestCase):
class
CSPStackTest
(
tf
.
test
.
TestCase
,
parameterized
.
TestCase
):
def
build_layer
(
self
,
layer_type
,
filters
,
filter_scale
,
count
,
stack_type
,
downsample
):
def
build_layer
(
self
,
layer_type
,
filters
,
filter_scale
,
count
,
stack_type
,
downsample
):
if
stack_type
is
not
None
:
layers
=
[]
if
layer_type
==
"
residual
"
:
if
layer_type
==
'
residual
'
:
for
_
in
range
(
count
):
layers
.
append
(
nn_blocks
.
DarkResidual
(
...
...
@@ -120,7 +121,7 @@ class CSPStackTest(tf.test.TestCase, parameterized.TestCase):
for
_
in
range
(
count
):
layers
.
append
(
nn_blocks
.
ConvBN
(
filters
=
filters
))
if
stack_type
==
"
model
"
:
if
stack_type
==
'
model
'
:
layers
=
tf
.
keras
.
Sequential
(
layers
=
layers
)
else
:
layers
=
None
...
...
@@ -133,10 +134,10 @@ class CSPStackTest(tf.test.TestCase, parameterized.TestCase):
return
stack
@
parameterized
.
named_parameters
(
(
"
no_stack
"
,
224
,
224
,
64
,
2
,
"
residual
"
,
None
,
0
,
True
),
(
"
residual_stack
"
,
224
,
224
,
64
,
2
,
"
residual
"
,
"
list
"
,
2
,
True
),
(
"
conv_stack
"
,
224
,
224
,
64
,
2
,
"
conv
"
,
"
list
"
,
3
,
False
),
(
"
callable_no_scale
"
,
224
,
224
,
64
,
1
,
"
residual
"
,
"
model
"
,
5
,
False
))
(
'
no_stack
'
,
224
,
224
,
64
,
2
,
'
residual
'
,
None
,
0
,
True
),
(
'
residual_stack
'
,
224
,
224
,
64
,
2
,
'
residual
'
,
'
list
'
,
2
,
True
),
(
'
conv_stack
'
,
224
,
224
,
64
,
2
,
'
conv
'
,
'
list
'
,
3
,
False
),
(
'
callable_no_scale
'
,
224
,
224
,
64
,
1
,
'
residual
'
,
'
model
'
,
5
,
False
))
def
test_pass_through
(
self
,
width
,
height
,
filters
,
mod
,
layer_type
,
stack_type
,
count
,
downsample
):
x
=
tf
.
keras
.
Input
(
shape
=
(
width
,
height
,
filters
))
...
...
@@ -152,10 +153,10 @@ class CSPStackTest(tf.test.TestCase, parameterized.TestCase):
self
.
assertAllEqual
(
outx
.
shape
.
as_list
(),
[
None
,
width
,
height
,
filters
])
@
parameterized
.
named_parameters
(
(
"
no_stack
"
,
224
,
224
,
64
,
2
,
"
residual
"
,
None
,
0
,
True
),
(
"
residual_stack
"
,
224
,
224
,
64
,
2
,
"
residual
"
,
"
list
"
,
2
,
True
),
(
"
conv_stack
"
,
224
,
224
,
64
,
2
,
"
conv
"
,
"
list
"
,
3
,
False
),
(
"
callable_no_scale
"
,
224
,
224
,
64
,
1
,
"
residual
"
,
"
model
"
,
5
,
False
))
(
'
no_stack
'
,
224
,
224
,
64
,
2
,
'
residual
'
,
None
,
0
,
True
),
(
'
residual_stack
'
,
224
,
224
,
64
,
2
,
'
residual
'
,
'
list
'
,
2
,
True
),
(
'
conv_stack
'
,
224
,
224
,
64
,
2
,
'
conv
'
,
'
list
'
,
3
,
False
),
(
'
callable_no_scale
'
,
224
,
224
,
64
,
1
,
'
residual
'
,
'
model
'
,
5
,
False
))
def
test_gradient_pass_though
(
self
,
width
,
height
,
filters
,
mod
,
layer_type
,
stack_type
,
count
,
downsample
):
loss
=
tf
.
keras
.
losses
.
MeanSquaredError
()
...
...
@@ -188,10 +189,10 @@ class CSPStackTest(tf.test.TestCase, parameterized.TestCase):
class
ConvBNTest
(
tf
.
test
.
TestCase
,
parameterized
.
TestCase
):
@
parameterized
.
named_parameters
(
(
"
valid
"
,
(
3
,
3
),
"
valid
"
,
(
1
,
1
)),
(
"
same
"
,
(
3
,
3
),
"
same
"
,
(
1
,
1
)),
(
"
downsample
"
,
(
3
,
3
),
"
same
"
,
(
2
,
2
)),
(
"
test
"
,
(
1
,
1
),
"
valid
"
,
(
1
,
1
)))
(
'
valid
'
,
(
3
,
3
),
'
valid
'
,
(
1
,
1
)),
(
'
same
'
,
(
3
,
3
),
'
same
'
,
(
1
,
1
)),
(
'
downsample
'
,
(
3
,
3
),
'
same
'
,
(
2
,
2
)),
(
'
test
'
,
(
1
,
1
),
'
valid
'
,
(
1
,
1
)))
def
test_pass_through
(
self
,
kernel_size
,
padding
,
strides
):
if
padding
==
"
same
"
:
if
padding
==
'
same
'
:
pad_const
=
1
else
:
pad_const
=
0
...
...
@@ -212,16 +213,16 @@ class ConvBNTest(tf.test.TestCase, parameterized.TestCase):
print
(
test
)
self
.
assertAllEqual
(
outx
.
shape
.
as_list
(),
test
)
@
parameterized
.
named_parameters
((
"
filters
"
,
3
))
@
parameterized
.
named_parameters
((
'
filters
'
,
3
))
def
test_gradient_pass_though
(
self
,
filters
):
loss
=
tf
.
keras
.
losses
.
MeanSquaredError
()
optimizer
=
tf
.
keras
.
optimizers
.
SGD
()
with
tf
.
device
(
"
/CPU:0
"
):
test_layer
=
nn_blocks
.
ConvBN
(
filters
,
kernel_size
=
(
3
,
3
),
padding
=
"
same
"
)
with
tf
.
device
(
'
/CPU:0
'
):
test_layer
=
nn_blocks
.
ConvBN
(
filters
,
kernel_size
=
(
3
,
3
),
padding
=
'
same
'
)
init
=
tf
.
random_normal_initializer
()
x
=
tf
.
Variable
(
initial_value
=
init
(
shape
=
(
1
,
224
,
224
,
3
),
dtype
=
tf
.
float32
))
x
=
tf
.
Variable
(
initial_value
=
init
(
shape
=
(
1
,
224
,
224
,
3
),
dtype
=
tf
.
float32
))
y
=
tf
.
Variable
(
initial_value
=
init
(
shape
=
(
1
,
224
,
224
,
filters
),
dtype
=
tf
.
float32
))
...
...
@@ -235,9 +236,9 @@ class ConvBNTest(tf.test.TestCase, parameterized.TestCase):
class
DarkResidualTest
(
tf
.
test
.
TestCase
,
parameterized
.
TestCase
):
@
parameterized
.
named_parameters
((
"
same
"
,
224
,
224
,
64
,
False
),
(
"
downsample
"
,
223
,
223
,
32
,
True
),
(
"
oddball
"
,
223
,
223
,
32
,
False
))
@
parameterized
.
named_parameters
((
'
same
'
,
224
,
224
,
64
,
False
),
(
'
downsample
'
,
223
,
223
,
32
,
True
),
(
'
oddball
'
,
223
,
223
,
32
,
False
))
def
test_pass_through
(
self
,
width
,
height
,
filters
,
downsample
):
mod
=
1
if
downsample
:
...
...
@@ -252,9 +253,9 @@ class DarkResidualTest(tf.test.TestCase, parameterized.TestCase):
[
None
,
np
.
ceil
(
width
/
mod
),
np
.
ceil
(
height
/
mod
),
filters
])
@
parameterized
.
named_parameters
((
"
same
"
,
64
,
224
,
224
,
False
),
(
"
downsample
"
,
32
,
223
,
223
,
True
),
(
"
oddball
"
,
32
,
223
,
223
,
False
))
@
parameterized
.
named_parameters
((
'
same
'
,
64
,
224
,
224
,
False
),
(
'
downsample
'
,
32
,
223
,
223
,
True
),
(
'
oddball
'
,
32
,
223
,
223
,
False
))
def
test_gradient_pass_though
(
self
,
filters
,
width
,
height
,
downsample
):
loss
=
tf
.
keras
.
losses
.
MeanSquaredError
()
optimizer
=
tf
.
keras
.
optimizers
.
SGD
()
...
...
@@ -268,10 +269,11 @@ class DarkResidualTest(tf.test.TestCase, parameterized.TestCase):
init
=
tf
.
random_normal_initializer
()
x
=
tf
.
Variable
(
initial_value
=
init
(
shape
=
(
1
,
width
,
height
,
filters
),
dtype
=
tf
.
float32
))
y
=
tf
.
Variable
(
initial_value
=
init
(
shape
=
(
1
,
int
(
np
.
ceil
(
width
/
mod
)),
int
(
np
.
ceil
(
height
/
mod
)),
filters
),
dtype
=
tf
.
float32
))
y
=
tf
.
Variable
(
initial_value
=
init
(
shape
=
(
1
,
int
(
np
.
ceil
(
width
/
mod
)),
int
(
np
.
ceil
(
height
/
mod
)),
filters
),
dtype
=
tf
.
float32
))
with
tf
.
GradientTape
()
as
tape
:
x_hat
=
test_layer
(
x
)
...
...
@@ -281,5 +283,104 @@ class DarkResidualTest(tf.test.TestCase, parameterized.TestCase):
self
.
assertNotIn
(
None
,
grad
)
if
__name__
==
"__main__"
:
class
DarkSppTest
(
tf
.
test
.
TestCase
,
parameterized
.
TestCase
):
@
parameterized
.
named_parameters
((
'RouteProcessSpp'
,
224
,
224
,
3
,
[
5
,
9
,
13
]),
(
'test1'
,
300
,
300
,
10
,
[
2
,
3
,
4
,
5
]),
(
'test2'
,
256
,
256
,
5
,
[
10
]))
def
test_pass_through
(
self
,
width
,
height
,
channels
,
sizes
):
x
=
tf
.
keras
.
Input
(
shape
=
(
width
,
height
,
channels
))
test_layer
=
nn_blocks
.
SPP
(
sizes
=
sizes
)
outx
=
test_layer
(
x
)
self
.
assertAllEqual
(
outx
.
shape
.
as_list
(),
[
None
,
width
,
height
,
channels
*
(
len
(
sizes
)
+
1
)])
return
@
parameterized
.
named_parameters
((
'RouteProcessSpp'
,
224
,
224
,
3
,
[
5
,
9
,
13
]),
(
'test1'
,
300
,
300
,
10
,
[
2
,
3
,
4
,
5
]),
(
'test2'
,
256
,
256
,
5
,
[
10
]))
def
test_gradient_pass_though
(
self
,
width
,
height
,
channels
,
sizes
):
loss
=
tf
.
keras
.
losses
.
MeanSquaredError
()
optimizer
=
tf
.
keras
.
optimizers
.
SGD
()
test_layer
=
nn_blocks
.
SPP
(
sizes
=
sizes
)
init
=
tf
.
random_normal_initializer
()
x
=
tf
.
Variable
(
initial_value
=
init
(
shape
=
(
1
,
width
,
height
,
channels
),
dtype
=
tf
.
float32
))
y
=
tf
.
Variable
(
initial_value
=
init
(
shape
=
(
1
,
width
,
height
,
channels
*
(
len
(
sizes
)
+
1
)),
dtype
=
tf
.
float32
))
with
tf
.
GradientTape
()
as
tape
:
x_hat
=
test_layer
(
x
)
grad_loss
=
loss
(
x_hat
,
y
)
grad
=
tape
.
gradient
(
grad_loss
,
test_layer
.
trainable_variables
)
optimizer
.
apply_gradients
(
zip
(
grad
,
test_layer
.
trainable_variables
))
self
.
assertNotIn
(
None
,
grad
)
return
class
DarkRouteProcessTest
(
tf
.
test
.
TestCase
,
parameterized
.
TestCase
):
@
parameterized
.
named_parameters
(
(
'test1'
,
224
,
224
,
64
,
7
,
False
),
(
'test2'
,
223
,
223
,
32
,
3
,
False
),
(
'tiny'
,
223
,
223
,
16
,
1
,
False
),
(
'spp'
,
224
,
224
,
64
,
7
,
False
))
def
test_pass_through
(
self
,
width
,
height
,
filters
,
repetitions
,
spp
):
x
=
tf
.
keras
.
Input
(
shape
=
(
width
,
height
,
filters
))
test_layer
=
nn_blocks
.
DarkRouteProcess
(
filters
=
filters
,
repetitions
=
repetitions
,
insert_spp
=
spp
)
outx
=
test_layer
(
x
)
self
.
assertLen
(
outx
,
2
,
msg
=
'len(outx) != 2'
)
if
repetitions
==
1
:
filter_y1
=
filters
else
:
filter_y1
=
filters
//
2
self
.
assertAllEqual
(
outx
[
1
].
shape
.
as_list
(),
[
None
,
width
,
height
,
filter_y1
])
self
.
assertAllEqual
(
filters
%
2
,
0
,
msg
=
'Output of a DarkRouteProcess layer has an odd number of filters'
)
self
.
assertAllEqual
(
outx
[
0
].
shape
.
as_list
(),
[
None
,
width
,
height
,
filters
])
@
parameterized
.
named_parameters
(
(
'test1'
,
224
,
224
,
64
,
7
,
False
),
(
'test2'
,
223
,
223
,
32
,
3
,
False
),
(
'tiny'
,
223
,
223
,
16
,
1
,
False
),
(
'spp'
,
224
,
224
,
64
,
7
,
False
))
def
test_gradient_pass_though
(
self
,
width
,
height
,
filters
,
repetitions
,
spp
):
loss
=
tf
.
keras
.
losses
.
MeanSquaredError
()
optimizer
=
tf
.
keras
.
optimizers
.
SGD
()
test_layer
=
nn_blocks
.
DarkRouteProcess
(
filters
=
filters
,
repetitions
=
repetitions
,
insert_spp
=
spp
)
if
repetitions
==
1
:
filter_y1
=
filters
else
:
filter_y1
=
filters
//
2
init
=
tf
.
random_normal_initializer
()
x
=
tf
.
Variable
(
initial_value
=
init
(
shape
=
(
1
,
width
,
height
,
filters
),
dtype
=
tf
.
float32
))
y_0
=
tf
.
Variable
(
initial_value
=
init
(
shape
=
(
1
,
width
,
height
,
filters
),
dtype
=
tf
.
float32
))
y_1
=
tf
.
Variable
(
initial_value
=
init
(
shape
=
(
1
,
width
,
height
,
filter_y1
),
dtype
=
tf
.
float32
))
with
tf
.
GradientTape
()
as
tape
:
x_hat_0
,
x_hat_1
=
test_layer
(
x
)
grad_loss_0
=
loss
(
x_hat_0
,
y_0
)
grad_loss_1
=
loss
(
x_hat_1
,
y_1
)
grad
=
tape
.
gradient
([
grad_loss_0
,
grad_loss_1
],
test_layer
.
trainable_variables
)
optimizer
.
apply_gradients
(
zip
(
grad
,
test_layer
.
trainable_variables
))
self
.
assertNotIn
(
None
,
grad
)
return
if
__name__
==
'__main__'
:
tf
.
test
.
main
()
research/object_detection/core/anchor_generator.py
View file @
2b676a9b
...
...
@@ -37,7 +37,6 @@ from abc import ABCMeta
from
abc
import
abstractmethod
import
six
from
six.moves
import
zip
import
tensorflow.compat.v1
as
tf
...
...
@@ -107,11 +106,9 @@ class AnchorGenerator(six.with_metaclass(ABCMeta, object)):
with
tf
.
name_scope
(
self
.
name_scope
()):
anchors_list
=
self
.
_generate
(
feature_map_shape_list
,
**
params
)
if
self
.
check_num_anchors
:
with
tf
.
control_dependencies
([
self
.
_assert_correct_number_of_anchors
(
anchors_list
,
feature_map_shape_list
)]):
for
item
in
anchors_list
:
item
.
set
(
tf
.
identity
(
item
.
get
()))
for
item
in
anchors_list
:
item
.
set
(
tf
.
identity
(
item
.
get
()))
return
anchors_list
@
abstractmethod
...
...
@@ -146,26 +143,3 @@ class AnchorGenerator(six.with_metaclass(ABCMeta, object)):
feature_map_indices_list
.
append
(
i
*
tf
.
ones
([
boxes
.
num_boxes
()],
dtype
=
tf
.
int32
))
return
tf
.
concat
(
feature_map_indices_list
,
axis
=
0
)
def
_assert_correct_number_of_anchors
(
self
,
anchors_list
,
feature_map_shape_list
):
"""Assert that correct number of anchors was generated.
Args:
anchors_list: A list of box_list.BoxList object holding anchors generated.
feature_map_shape_list: list of (height, width) pairs in the format
[(height_0, width_0), (height_1, width_1), ...] that the generated
anchors must align with.
Returns:
Op that raises InvalidArgumentError if the number of anchors does not
match the number of expected anchors.
"""
expected_num_anchors
=
0
actual_num_anchors
=
0
for
num_anchors_per_location
,
feature_map_shape
,
anchors
in
zip
(
self
.
num_anchors_per_location
(),
feature_map_shape_list
,
anchors_list
):
expected_num_anchors
+=
(
num_anchors_per_location
*
feature_map_shape
[
0
]
*
feature_map_shape
[
1
])
actual_num_anchors
+=
anchors
.
num_boxes
()
return
tf
.
assert_equal
(
expected_num_anchors
,
actual_num_anchors
)
research/object_detection/models/keras_models/resnet_v1.py
View file @
2b676a9b
...
...
@@ -19,9 +19,10 @@ from __future__ import absolute_import
from
__future__
import
division
from
__future__
import
print_function
from
keras.applications
import
resnet
import
tensorflow.compat.v1
as
tf
from
tensorflow.python.keras.applications
import
resnet
from
object_detection.core
import
freezable_batch_norm
from
object_detection.models.keras_models
import
model_utils
...
...
Prev
1
2
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment