Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ModelZoo
ResNet50_tensorflow
Commits
420a7253
Unverified
Commit
420a7253
authored
Jun 17, 2020
by
pkulzc
Committed by
GitHub
Jun 17, 2020
Browse files
Refactor tests for Object Detection API. (#8688)
Internal changes -- PiperOrigin-RevId: 316837667
parent
d0ef3913
Changes
161
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
883 additions
and
459 deletions
+883
-459
research/object_detection/models/center_net_resnet_v1_fpn_feature_extractor_tf2_test.py
...ls/center_net_resnet_v1_fpn_feature_extractor_tf2_test.py
+49
-0
research/object_detection/models/embedded_ssd_mobilenet_v1_feature_extractor_tf1_test.py
...s/embedded_ssd_mobilenet_v1_feature_extractor_tf1_test.py
+3
-0
research/object_detection/models/faster_rcnn_inception_resnet_v2_feature_extractor_tf1_test.py
...er_rcnn_inception_resnet_v2_feature_extractor_tf1_test.py
+3
-1
research/object_detection/models/faster_rcnn_inception_resnet_v2_keras_feature_extractor_tf2_test.py
...n_inception_resnet_v2_keras_feature_extractor_tf2_test.py
+7
-36
research/object_detection/models/faster_rcnn_inception_v2_feature_extractor_tf1_test.py
...ls/faster_rcnn_inception_v2_feature_extractor_tf1_test.py
+3
-1
research/object_detection/models/faster_rcnn_mobilenet_v1_feature_extractor_tf1_test.py
...ls/faster_rcnn_mobilenet_v1_feature_extractor_tf1_test.py
+3
-1
research/object_detection/models/faster_rcnn_nas_feature_extractor.py
...ect_detection/models/faster_rcnn_nas_feature_extractor.py
+8
-2
research/object_detection/models/faster_rcnn_nas_feature_extractor_tf1_test.py
...tion/models/faster_rcnn_nas_feature_extractor_tf1_test.py
+3
-1
research/object_detection/models/faster_rcnn_pnas_feature_extractor.py
...ct_detection/models/faster_rcnn_pnas_feature_extractor.py
+5
-1
research/object_detection/models/faster_rcnn_pnas_feature_extractor_tf1_test.py
...ion/models/faster_rcnn_pnas_feature_extractor_tf1_test.py
+3
-1
research/object_detection/models/faster_rcnn_resnet_keras_feature_extractor.py
...tion/models/faster_rcnn_resnet_keras_feature_extractor.py
+271
-0
research/object_detection/models/faster_rcnn_resnet_keras_feature_extractor_tf2_test.py
...ls/faster_rcnn_resnet_keras_feature_extractor_tf2_test.py
+80
-0
research/object_detection/models/faster_rcnn_resnet_v1_feature_extractor_tf1_test.py
...odels/faster_rcnn_resnet_v1_feature_extractor_tf1_test.py
+3
-1
research/object_detection/models/feature_map_generators_test.py
...ch/object_detection/models/feature_map_generators_test.py
+287
-338
research/object_detection/models/keras_models/convert_keras_models.py
...ect_detection/models/keras_models/convert_keras_models.py
+85
-0
research/object_detection/models/keras_models/hourglass_network_tf2_test.py
...tection/models/keras_models/hourglass_network_tf2_test.py
+3
-2
research/object_detection/models/keras_models/inception_resnet_v2_tf2_test.py
...ction/models/keras_models/inception_resnet_v2_tf2_test.py
+4
-3
research/object_detection/models/keras_models/mobilenet_v1_tf2_test.py
...ct_detection/models/keras_models/mobilenet_v1_tf2_test.py
+27
-34
research/object_detection/models/keras_models/mobilenet_v2_tf2_test.py
...ct_detection/models/keras_models/mobilenet_v2_tf2_test.py
+32
-34
research/object_detection/models/keras_models/resnet_v1_tf2_test.py
...bject_detection/models/keras_models/resnet_v1_tf2_test.py
+4
-3
No files found.
research/object_detection/models/center_net_resnet_v1_fpn_feature_extractor_tf2_test.py
0 → 100644
View file @
420a7253
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Testing ResNet v1 FPN models for the CenterNet meta architecture."""
import
unittest
from
absl.testing
import
parameterized
import
numpy
as
np
import
tensorflow.compat.v1
as
tf
from
object_detection.models
import
center_net_resnet_v1_fpn_feature_extractor
from
object_detection.utils
import
test_case
from
object_detection.utils
import
tf_version
@
unittest
.
skipIf
(
tf_version
.
is_tf1
(),
'Skipping TF2.X only test.'
)
class
CenterNetResnetV1FpnFeatureExtractorTest
(
test_case
.
TestCase
,
parameterized
.
TestCase
):
@
parameterized
.
parameters
(
{
'resnet_type'
:
'resnet_v1_50'
},
{
'resnet_type'
:
'resnet_v1_101'
},
)
def
test_correct_output_size
(
self
,
resnet_type
):
"""Verify that shape of features returned by the backbone is correct."""
model
=
center_net_resnet_v1_fpn_feature_extractor
.
\
CenterNetResnetV1FpnFeatureExtractor
(
resnet_type
)
def
graph_fn
():
img
=
np
.
zeros
((
8
,
224
,
224
,
3
),
dtype
=
np
.
float32
)
processed_img
=
model
.
preprocess
(
img
)
return
model
(
processed_img
)
self
.
assertEqual
(
self
.
execute
(
graph_fn
,
[]).
shape
,
(
8
,
56
,
56
,
64
))
if
__name__
==
'__main__'
:
tf
.
test
.
main
()
research/object_detection/models/embedded_ssd_mobilenet_v1_feature_extractor_test.py
→
research/object_detection/models/embedded_ssd_mobilenet_v1_feature_extractor_
tf1_
test.py
View file @
420a7253
...
...
@@ -14,13 +14,16 @@
# ==============================================================================
"""Tests for embedded_ssd_mobilenet_v1_feature_extractor."""
import
unittest
import
numpy
as
np
import
tensorflow.compat.v1
as
tf
from
object_detection.models
import
embedded_ssd_mobilenet_v1_feature_extractor
from
object_detection.models
import
ssd_feature_extractor_test
from
object_detection.utils
import
tf_version
@
unittest
.
skipIf
(
tf_version
.
is_tf2
(),
'Skipping TF1.X only test.'
)
class
EmbeddedSSDMobileNetV1FeatureExtractorTest
(
ssd_feature_extractor_test
.
SsdFeatureExtractorTestBase
):
...
...
research/object_detection/models/faster_rcnn_inception_resnet_v2_feature_extractor_test.py
→
research/object_detection/models/faster_rcnn_inception_resnet_v2_feature_extractor_
tf1_
test.py
View file @
420a7253
...
...
@@ -14,12 +14,14 @@
# ==============================================================================
"""Tests for models.faster_rcnn_inception_resnet_v2_feature_extractor."""
import
unittest
import
tensorflow.compat.v1
as
tf
from
object_detection.models
import
faster_rcnn_inception_resnet_v2_feature_extractor
as
frcnn_inc_res
from
object_detection.utils
import
tf_version
@
unittest
.
skipIf
(
tf_version
.
is_tf2
(),
'Skipping TF1.X only test.'
)
class
FasterRcnnInceptionResnetV2FeatureExtractorTest
(
tf
.
test
.
TestCase
):
def
_build_feature_extractor
(
self
,
first_stage_features_stride
):
...
...
research/object_detection/models/faster_rcnn_inception_resnet_v2_keras_feature_extractor_test.py
→
research/object_detection/models/faster_rcnn_inception_resnet_v2_keras_feature_extractor_
tf2_
test.py
View file @
420a7253
...
...
@@ -14,12 +14,14 @@
# ==============================================================================
"""Tests for models.faster_rcnn_inception_resnet_v2_keras_feature_extractor."""
import
unittest
import
tensorflow.compat.v1
as
tf
from
object_detection.models
import
faster_rcnn_inception_resnet_v2_keras_feature_extractor
as
frcnn_inc_res
from
object_detection.utils
import
tf_version
@
unittest
.
skipIf
(
tf_version
.
is_tf1
(),
'Skipping TF2.X only test.'
)
class
FasterRcnnInceptionResnetV2KerasFeatureExtractorTest
(
tf
.
test
.
TestCase
):
def
_build_feature_extractor
(
self
,
first_stage_features_stride
):
...
...
@@ -38,11 +40,7 @@ class FasterRcnnInceptionResnetV2KerasFeatureExtractorTest(tf.test.TestCase):
name
=
'TestScope'
)(
preprocessed_inputs
)
features_shape
=
tf
.
shape
(
rpn_feature_map
)
init_op
=
tf
.
global_variables_initializer
()
with
self
.
test_session
()
as
sess
:
sess
.
run
(
init_op
)
features_shape_out
=
sess
.
run
(
features_shape
)
self
.
assertAllEqual
(
features_shape_out
,
[
1
,
19
,
19
,
1088
])
self
.
assertAllEqual
(
features_shape
.
numpy
(),
[
1
,
19
,
19
,
1088
])
def
test_extract_proposal_features_stride_eight
(
self
):
feature_extractor
=
self
.
_build_feature_extractor
(
...
...
@@ -53,11 +51,7 @@ class FasterRcnnInceptionResnetV2KerasFeatureExtractorTest(tf.test.TestCase):
name
=
'TestScope'
)(
preprocessed_inputs
)
features_shape
=
tf
.
shape
(
rpn_feature_map
)
init_op
=
tf
.
global_variables_initializer
()
with
self
.
test_session
()
as
sess
:
sess
.
run
(
init_op
)
features_shape_out
=
sess
.
run
(
features_shape
)
self
.
assertAllEqual
(
features_shape_out
,
[
1
,
28
,
28
,
1088
])
self
.
assertAllEqual
(
features_shape
.
numpy
(),
[
1
,
28
,
28
,
1088
])
def
test_extract_proposal_features_half_size_input
(
self
):
feature_extractor
=
self
.
_build_feature_extractor
(
...
...
@@ -67,25 +61,7 @@ class FasterRcnnInceptionResnetV2KerasFeatureExtractorTest(tf.test.TestCase):
rpn_feature_map
=
feature_extractor
.
get_proposal_feature_extractor_model
(
name
=
'TestScope'
)(
preprocessed_inputs
)
features_shape
=
tf
.
shape
(
rpn_feature_map
)
init_op
=
tf
.
global_variables_initializer
()
with
self
.
test_session
()
as
sess
:
sess
.
run
(
init_op
)
features_shape_out
=
sess
.
run
(
features_shape
)
self
.
assertAllEqual
(
features_shape_out
,
[
1
,
7
,
7
,
1088
])
def
test_extract_proposal_features_dies_on_invalid_stride
(
self
):
with
self
.
assertRaises
(
ValueError
):
self
.
_build_feature_extractor
(
first_stage_features_stride
=
99
)
def
test_extract_proposal_features_dies_with_incorrect_rank_inputs
(
self
):
feature_extractor
=
self
.
_build_feature_extractor
(
first_stage_features_stride
=
16
)
preprocessed_inputs
=
tf
.
random_uniform
(
[
224
,
224
,
3
],
maxval
=
255
,
dtype
=
tf
.
float32
)
with
self
.
assertRaises
(
ValueError
):
feature_extractor
.
get_proposal_feature_extractor_model
(
name
=
'TestScope'
)(
preprocessed_inputs
)
self
.
assertAllEqual
(
features_shape
.
numpy
(),
[
1
,
7
,
7
,
1088
])
def
test_extract_box_classifier_features_returns_expected_size
(
self
):
feature_extractor
=
self
.
_build_feature_extractor
(
...
...
@@ -97,12 +73,7 @@ class FasterRcnnInceptionResnetV2KerasFeatureExtractorTest(tf.test.TestCase):
proposal_classifier_features
=
(
model
(
proposal_feature_maps
))
features_shape
=
tf
.
shape
(
proposal_classifier_features
)
init_op
=
tf
.
global_variables_initializer
()
with
self
.
test_session
()
as
sess
:
sess
.
run
(
init_op
)
features_shape_out
=
sess
.
run
(
features_shape
)
self
.
assertAllEqual
(
features_shape_out
,
[
2
,
8
,
8
,
1536
])
self
.
assertAllEqual
(
features_shape
.
numpy
(),
[
2
,
8
,
8
,
1536
])
if
__name__
==
'__main__'
:
...
...
research/object_detection/models/faster_rcnn_inception_v2_feature_extractor_test.py
→
research/object_detection/models/faster_rcnn_inception_v2_feature_extractor_
tf1_
test.py
View file @
420a7253
...
...
@@ -14,13 +14,15 @@
# ==============================================================================
"""Tests for faster_rcnn_inception_v2_feature_extractor."""
import
unittest
import
numpy
as
np
import
tensorflow.compat.v1
as
tf
from
object_detection.models
import
faster_rcnn_inception_v2_feature_extractor
as
faster_rcnn_inception_v2
from
object_detection.utils
import
tf_version
@
unittest
.
skipIf
(
tf_version
.
is_tf2
(),
'Skipping TF1.X only test.'
)
class
FasterRcnnInceptionV2FeatureExtractorTest
(
tf
.
test
.
TestCase
):
def
_build_feature_extractor
(
self
,
first_stage_features_stride
):
...
...
research/object_detection/models/faster_rcnn_mobilenet_v1_feature_extractor_test.py
→
research/object_detection/models/faster_rcnn_mobilenet_v1_feature_extractor_
tf1_
test.py
View file @
420a7253
...
...
@@ -14,13 +14,15 @@
# ==============================================================================
"""Tests for faster_rcnn_mobilenet_v1_feature_extractor."""
import
unittest
import
numpy
as
np
import
tensorflow.compat.v1
as
tf
from
object_detection.models
import
faster_rcnn_mobilenet_v1_feature_extractor
as
faster_rcnn_mobilenet_v1
from
object_detection.utils
import
tf_version
@
unittest
.
skipIf
(
tf_version
.
is_tf2
(),
'Skipping TF1.X only test.'
)
class
FasterRcnnMobilenetV1FeatureExtractorTest
(
tf
.
test
.
TestCase
):
def
_build_feature_extractor
(
self
,
first_stage_features_stride
):
...
...
research/object_detection/models/faster_rcnn_nas_feature_extractor.py
View file @
420a7253
...
...
@@ -31,8 +31,14 @@ import tf_slim as slim
from
object_detection.meta_architectures
import
faster_rcnn_meta_arch
from
object_detection.utils
import
variables_helper
from
nets.nasnet
import
nasnet
from
nets.nasnet
import
nasnet_utils
# pylint: disable=g-import-not-at-top
try
:
from
nets.nasnet
import
nasnet
from
nets.nasnet
import
nasnet_utils
except
:
# pylint: disable=bare-except
pass
# pylint: enable=g-import-not-at-top
arg_scope
=
slim
.
arg_scope
...
...
research/object_detection/models/faster_rcnn_nas_feature_extractor_test.py
→
research/object_detection/models/faster_rcnn_nas_feature_extractor_
tf1_
test.py
View file @
420a7253
...
...
@@ -14,12 +14,14 @@
# ==============================================================================
"""Tests for models.faster_rcnn_nas_feature_extractor."""
import
unittest
import
tensorflow.compat.v1
as
tf
from
object_detection.models
import
faster_rcnn_nas_feature_extractor
as
frcnn_nas
from
object_detection.utils
import
tf_version
@
unittest
.
skipIf
(
tf_version
.
is_tf2
(),
'Skipping TF1.X only test.'
)
class
FasterRcnnNASFeatureExtractorTest
(
tf
.
test
.
TestCase
):
def
_build_feature_extractor
(
self
,
first_stage_features_stride
):
...
...
research/object_detection/models/faster_rcnn_pnas_feature_extractor.py
View file @
420a7253
...
...
@@ -30,7 +30,11 @@ import tf_slim as slim
from
object_detection.meta_architectures
import
faster_rcnn_meta_arch
from
object_detection.utils
import
variables_helper
from
nets.nasnet
import
nasnet_utils
from
nets.nasnet
import
pnasnet
try
:
from
nets.nasnet
import
pnasnet
# pylint: disable=g-import-not-at-top
except
:
# pylint: disable=bare-except
pass
arg_scope
=
slim
.
arg_scope
...
...
research/object_detection/models/faster_rcnn_pnas_feature_extractor_test.py
→
research/object_detection/models/faster_rcnn_pnas_feature_extractor_
tf1_
test.py
View file @
420a7253
...
...
@@ -14,12 +14,14 @@
# ==============================================================================
"""Tests for models.faster_rcnn_pnas_feature_extractor."""
import
unittest
import
tensorflow.compat.v1
as
tf
from
object_detection.models
import
faster_rcnn_pnas_feature_extractor
as
frcnn_pnas
from
object_detection.utils
import
tf_version
@
unittest
.
skipIf
(
tf_version
.
is_tf2
(),
'Skipping TF1.X only test.'
)
class
FasterRcnnPNASFeatureExtractorTest
(
tf
.
test
.
TestCase
):
def
_build_feature_extractor
(
self
,
first_stage_features_stride
):
...
...
research/object_detection/models/faster_rcnn_resnet_keras_feature_extractor.py
0 → 100644
View file @
420a7253
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Resnet based Faster R-CNN implementation in Keras.
See Deep Residual Learning for Image Recognition by He et al.
https://arxiv.org/abs/1512.03385
"""
import
tensorflow.compat.v1
as
tf
from
object_detection.meta_architectures
import
faster_rcnn_meta_arch
from
object_detection.models.keras_models
import
resnet_v1
from
object_detection.utils
import
model_util
_RESNET_MODEL_CONV4_LAST_LAYERS
=
{
'resnet_v1_50'
:
'conv4_block6_out'
,
'resnet_v1_101'
:
'conv4_block23_out'
,
'resnet_v1_152'
:
'conv4_block36_out'
,
}
class
FasterRCNNResnetKerasFeatureExtractor
(
faster_rcnn_meta_arch
.
FasterRCNNKerasFeatureExtractor
):
"""Faster R-CNN with Resnet feature extractor implementation."""
def
__init__
(
self
,
is_training
,
resnet_v1_base_model
,
resnet_v1_base_model_name
,
first_stage_features_stride
=
16
,
batch_norm_trainable
=
False
,
weight_decay
=
0.0
):
"""Constructor.
Args:
is_training: See base class.
resnet_v1_base_model: base resnet v1 network to use. One of
the resnet_v1.resnet_v1_{50,101,152} models.
resnet_v1_base_model_name: model name under which to construct resnet v1.
first_stage_features_stride: See base class.
batch_norm_trainable: See base class.
weight_decay: See base class.
Raises:
ValueError: If `first_stage_features_stride` is not 8 or 16.
"""
if
first_stage_features_stride
!=
16
:
raise
ValueError
(
'`first_stage_features_stride` must be 16.'
)
super
(
FasterRCNNResnetKerasFeatureExtractor
,
self
).
__init__
(
is_training
,
first_stage_features_stride
,
batch_norm_trainable
,
weight_decay
)
self
.
classification_backbone
=
None
self
.
_variable_dict
=
{}
self
.
_resnet_v1_base_model
=
resnet_v1_base_model
self
.
_resnet_v1_base_model_name
=
resnet_v1_base_model_name
def
preprocess
(
self
,
resized_inputs
):
"""Faster R-CNN Resnet V1 preprocessing.
VGG style channel mean subtraction as described here:
https://gist.github.com/ksimonyan/211839e770f7b538e2d8#file-readme-md
Note that if the number of channels is not equal to 3, the mean subtraction
will be skipped and the original resized_inputs will be returned.
Args:
resized_inputs: A [batch, height_in, width_in, channels] float32 tensor
representing a batch of images with values between 0 and 255.0.
Returns:
preprocessed_inputs: A [batch, height_out, width_out, channels] float32
tensor representing a batch of images.
"""
if
resized_inputs
.
shape
.
as_list
()[
3
]
==
3
:
channel_means
=
[
123.68
,
116.779
,
103.939
]
return
resized_inputs
-
[[
channel_means
]]
else
:
return
resized_inputs
def
get_proposal_feature_extractor_model
(
self
,
name
=
None
):
"""Returns a model that extracts first stage RPN features.
Extracts features using the first half of the Resnet v1 network.
Args:
name: A scope name to construct all variables within.
Returns:
A Keras model that takes preprocessed_inputs:
A [batch, height, width, channels] float32 tensor
representing a batch of images.
And returns rpn_feature_map:
A tensor with shape [batch, height, width, depth]
"""
if
not
self
.
classification_backbone
:
self
.
classification_backbone
=
self
.
_resnet_v1_base_model
(
batchnorm_training
=
self
.
_train_batch_norm
,
conv_hyperparams
=
None
,
weight_decay
=
self
.
_weight_decay
,
classes
=
None
,
weights
=
None
,
include_top
=
False
)
with
tf
.
name_scope
(
name
):
with
tf
.
name_scope
(
'ResnetV1'
):
conv4_last_layer
=
_RESNET_MODEL_CONV4_LAST_LAYERS
[
self
.
_resnet_v1_base_model_name
]
proposal_features
=
self
.
classification_backbone
.
get_layer
(
name
=
conv4_last_layer
).
output
keras_model
=
tf
.
keras
.
Model
(
inputs
=
self
.
classification_backbone
.
inputs
,
outputs
=
proposal_features
)
for
variable
in
keras_model
.
variables
:
self
.
_variable_dict
[
variable
.
name
[:
-
2
]]
=
variable
return
keras_model
def
get_box_classifier_feature_extractor_model
(
self
,
name
=
None
):
"""Returns a model that extracts second stage box classifier features.
This function reconstructs the "second half" of the ResNet v1
network after the part defined in `get_proposal_feature_extractor_model`.
Args:
name: A scope name to construct all variables within.
Returns:
A Keras model that takes proposal_feature_maps:
A 4-D float tensor with shape
[batch_size * self.max_num_proposals, crop_height, crop_width, depth]
representing the feature map cropped to each proposal.
And returns proposal_classifier_features:
A 4-D float tensor with shape
[batch_size * self.max_num_proposals, height, width, depth]
representing box classifier features for each proposal.
"""
if
not
self
.
classification_backbone
:
self
.
classification_backbone
=
self
.
_resnet_v1_base_model
(
batchnorm_training
=
self
.
_train_batch_norm
,
conv_hyperparams
=
None
,
weight_decay
=
self
.
_weight_decay
,
classes
=
None
,
weights
=
None
,
include_top
=
False
)
with
tf
.
name_scope
(
name
):
with
tf
.
name_scope
(
'ResnetV1'
):
conv4_last_layer
=
_RESNET_MODEL_CONV4_LAST_LAYERS
[
self
.
_resnet_v1_base_model_name
]
proposal_feature_maps
=
self
.
classification_backbone
.
get_layer
(
name
=
conv4_last_layer
).
output
proposal_classifier_features
=
self
.
classification_backbone
.
get_layer
(
name
=
'conv5_block3_out'
).
output
keras_model
=
model_util
.
extract_submodel
(
model
=
self
.
classification_backbone
,
inputs
=
proposal_feature_maps
,
outputs
=
proposal_classifier_features
)
for
variable
in
keras_model
.
variables
:
self
.
_variable_dict
[
variable
.
name
[:
-
2
]]
=
variable
return
keras_model
def
restore_from_classification_checkpoint_fn
(
self
,
first_stage_feature_extractor_scope
,
second_stage_feature_extractor_scope
):
"""Returns a map for restoring from an (object-based) checkpoint.
Args:
first_stage_feature_extractor_scope: A scope name for the first stage
feature extractor (unused).
second_stage_feature_extractor_scope: A scope name for the second stage
feature extractor (unused).
Returns:
A dict mapping keys to Keras models
"""
return
{
'feature_extractor'
:
self
.
classification_backbone
}
class
FasterRCNNResnet50KerasFeatureExtractor
(
FasterRCNNResnetKerasFeatureExtractor
):
"""Faster R-CNN with Resnet50 feature extractor implementation."""
def
__init__
(
self
,
is_training
,
first_stage_features_stride
=
16
,
batch_norm_trainable
=
False
,
weight_decay
=
0.0
):
"""Constructor.
Args:
is_training: See base class.
first_stage_features_stride: See base class.
batch_norm_trainable: See base class.
weight_decay: See base class.
"""
super
(
FasterRCNNResnet50KerasFeatureExtractor
,
self
).
__init__
(
is_training
=
is_training
,
resnet_v1_base_model
=
resnet_v1
.
resnet_v1_50
,
resnet_v1_base_model_name
=
'resnet_v1_50'
,
first_stage_features_stride
=
first_stage_features_stride
,
batch_norm_trainable
=
batch_norm_trainable
,
weight_decay
=
weight_decay
)
class
FasterRCNNResnet101KerasFeatureExtractor
(
FasterRCNNResnetKerasFeatureExtractor
):
"""Faster R-CNN with Resnet101 feature extractor implementation."""
def
__init__
(
self
,
is_training
,
first_stage_features_stride
=
16
,
batch_norm_trainable
=
False
,
weight_decay
=
0.0
):
"""Constructor.
Args:
is_training: See base class.
first_stage_features_stride: See base class.
batch_norm_trainable: See base class.
weight_decay: See base class.
"""
super
(
FasterRCNNResnet101KerasFeatureExtractor
,
self
).
__init__
(
is_training
=
is_training
,
resnet_v1_base_model
=
resnet_v1
.
resnet_v1_101
,
resnet_v1_base_model_name
=
'resnet_v1_101'
,
first_stage_features_stride
=
first_stage_features_stride
,
batch_norm_trainable
=
batch_norm_trainable
,
weight_decay
=
weight_decay
)
class
FasterRCNNResnet152KerasFeatureExtractor
(
FasterRCNNResnetKerasFeatureExtractor
):
"""Faster R-CNN with Resnet152 feature extractor implementation."""
def
__init__
(
self
,
is_training
,
first_stage_features_stride
=
16
,
batch_norm_trainable
=
False
,
weight_decay
=
0.0
):
"""Constructor.
Args:
is_training: See base class.
first_stage_features_stride: See base class.
batch_norm_trainable: See base class.
weight_decay: See base class.
"""
super
(
FasterRCNNResnet152KerasFeatureExtractor
,
self
).
__init__
(
is_training
=
is_training
,
resnet_v1_base_model
=
resnet_v1
.
resnet_v1_152
,
resnet_v1_base_model_name
=
'resnet_v1_152'
,
first_stage_features_stride
=
first_stage_features_stride
,
batch_norm_trainable
=
batch_norm_trainable
,
weight_decay
=
weight_decay
)
research/object_detection/models/faster_rcnn_resnet_keras_feature_extractor_tf2_test.py
0 → 100644
View file @
420a7253
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for models.faster_rcnn_resnet_keras_feature_extractor."""
import
unittest
import
tensorflow.compat.v1
as
tf
from
object_detection.models
import
faster_rcnn_resnet_keras_feature_extractor
as
frcnn_res
from
object_detection.utils
import
tf_version
@
unittest
.
skipIf
(
tf_version
.
is_tf1
(),
'Skipping TF2.X only test.'
)
class
FasterRcnnResnetKerasFeatureExtractorTest
(
tf
.
test
.
TestCase
):
def
_build_feature_extractor
(
self
,
architecture
=
'resnet_v1_50'
):
return
frcnn_res
.
FasterRCNNResnet50KerasFeatureExtractor
(
is_training
=
False
,
first_stage_features_stride
=
16
,
batch_norm_trainable
=
False
,
weight_decay
=
0.0
)
def
test_extract_proposal_features_returns_expected_size
(
self
):
feature_extractor
=
self
.
_build_feature_extractor
()
preprocessed_inputs
=
tf
.
random_uniform
(
[
1
,
224
,
224
,
3
],
maxval
=
255
,
dtype
=
tf
.
float32
)
rpn_feature_map
=
feature_extractor
.
get_proposal_feature_extractor_model
(
name
=
'TestScope'
)(
preprocessed_inputs
)
features_shape
=
tf
.
shape
(
rpn_feature_map
)
self
.
assertAllEqual
(
features_shape
.
numpy
(),
[
1
,
14
,
14
,
1024
])
def
test_extract_proposal_features_half_size_input
(
self
):
feature_extractor
=
self
.
_build_feature_extractor
()
preprocessed_inputs
=
tf
.
random_uniform
(
[
1
,
112
,
112
,
3
],
maxval
=
255
,
dtype
=
tf
.
float32
)
rpn_feature_map
=
feature_extractor
.
get_proposal_feature_extractor_model
(
name
=
'TestScope'
)(
preprocessed_inputs
)
features_shape
=
tf
.
shape
(
rpn_feature_map
)
self
.
assertAllEqual
(
features_shape
.
numpy
(),
[
1
,
7
,
7
,
1024
])
def
test_extract_proposal_features_dies_with_incorrect_rank_inputs
(
self
):
feature_extractor
=
self
.
_build_feature_extractor
()
preprocessed_inputs
=
tf
.
random_uniform
(
[
224
,
224
,
3
],
maxval
=
255
,
dtype
=
tf
.
float32
)
with
self
.
assertRaises
(
tf
.
errors
.
InvalidArgumentError
):
feature_extractor
.
get_proposal_feature_extractor_model
(
name
=
'TestScope'
)(
preprocessed_inputs
)
def
test_extract_box_classifier_features_returns_expected_size
(
self
):
feature_extractor
=
self
.
_build_feature_extractor
()
proposal_feature_maps
=
tf
.
random_uniform
(
[
3
,
7
,
7
,
1024
],
maxval
=
255
,
dtype
=
tf
.
float32
)
model
=
feature_extractor
.
get_box_classifier_feature_extractor_model
(
name
=
'TestScope'
)
proposal_classifier_features
=
(
model
(
proposal_feature_maps
))
features_shape
=
tf
.
shape
(
proposal_classifier_features
)
# Note: due to a slight mismatch in slim and keras resnet definitions
# the output shape of the box classifier is slightly different compared to
# that of the slim implementation. The keras version is more `canonical`
# in that it more accurately reflects the original authors' implementation.
# TODO(jonathanhuang): make the output shape match that of the slim
# implementation by using atrous convolutions.
self
.
assertAllEqual
(
features_shape
.
numpy
(),
[
3
,
4
,
4
,
2048
])
if
__name__
==
'__main__'
:
tf
.
enable_v2_behavior
()
tf
.
test
.
main
()
research/object_detection/models/faster_rcnn_resnet_v1_feature_extractor_test.py
→
research/object_detection/models/faster_rcnn_resnet_v1_feature_extractor_
tf1_
test.py
View file @
420a7253
...
...
@@ -14,13 +14,15 @@
# ==============================================================================
"""Tests for object_detection.models.faster_rcnn_resnet_v1_feature_extractor."""
import
unittest
import
numpy
as
np
import
tensorflow.compat.v1
as
tf
from
object_detection.models
import
faster_rcnn_resnet_v1_feature_extractor
as
faster_rcnn_resnet_v1
from
object_detection.utils
import
tf_version
@
unittest
.
skipIf
(
tf_version
.
is_tf2
(),
'Skipping TF1.X only test.'
)
class
FasterRcnnResnetV1FeatureExtractorTest
(
tf
.
test
.
TestCase
):
def
_build_feature_extractor
(
self
,
...
...
research/object_detection/models/feature_map_generators_test.py
View file @
420a7253
...
...
@@ -14,7 +14,7 @@
# ==============================================================================
"""Tests for feature map generators."""
import
unittest
from
absl.testing
import
parameterized
import
numpy
as
np
...
...
@@ -25,6 +25,9 @@ from google.protobuf import text_format
from
object_detection.builders
import
hyperparams_builder
from
object_detection.models
import
feature_map_generators
from
object_detection.protos
import
hyperparams_pb2
from
object_detection.utils
import
test_case
from
object_detection.utils
import
test_utils
from
object_detection.utils
import
tf_version
INCEPTION_V2_LAYOUT
=
{
'from_layer'
:
[
'Mixed_3c'
,
'Mixed_4c'
,
'Mixed_5c'
,
''
,
''
,
''
],
...
...
@@ -52,11 +55,7 @@ SSD_MOBILENET_V1_WEIGHT_SHARED_LAYOUT = {
}
@
parameterized
.
parameters
(
{
'use_keras'
:
False
},
{
'use_keras'
:
True
},
)
class
MultiResolutionFeatureMapGeneratorTest
(
tf
.
test
.
TestCase
):
class
MultiResolutionFeatureMapGeneratorTest
(
test_case
.
TestCase
):
def
_build_conv_hyperparams
(
self
):
conv_hyperparams
=
hyperparams_pb2
.
Hyperparams
()
...
...
@@ -73,9 +72,9 @@ class MultiResolutionFeatureMapGeneratorTest(tf.test.TestCase):
text_format
.
Merge
(
conv_hyperparams_text_proto
,
conv_hyperparams
)
return
hyperparams_builder
.
KerasLayerHyperparams
(
conv_hyperparams
)
def
_build_feature_map_generator
(
self
,
feature_map_layout
,
use_keras
,
def
_build_feature_map_generator
(
self
,
feature_map_layout
,
pool_residual
=
False
):
if
use_keras
:
if
tf_version
.
is_tf2
()
:
return
feature_map_generators
.
KerasMultiResolutionFeatureMaps
(
feature_map_layout
=
feature_map_layout
,
depth_multiplier
=
1
,
...
...
@@ -97,17 +96,18 @@ class MultiResolutionFeatureMapGeneratorTest(tf.test.TestCase):
pool_residual
=
pool_residual
)
return
feature_map_generator
def
test_get_expected_feature_map_shapes_with_inception_v2
(
self
,
use_keras
):
image_features
=
{
'Mixed_3c'
:
tf
.
random_uniform
([
4
,
28
,
28
,
256
],
dtype
=
tf
.
float32
),
'Mixed_4c'
:
tf
.
random_uniform
([
4
,
14
,
14
,
576
],
dtype
=
tf
.
float32
),
'Mixed_5c'
:
tf
.
random_uniform
([
4
,
7
,
7
,
1024
],
dtype
=
tf
.
float32
)
}
feature_map_generator
=
self
.
_build_feature_map_generator
(
feature_map_layout
=
INCEPTION_V2_LAYOUT
,
use_keras
=
use_keras
)
feature_maps
=
feature_map_generator
(
image_features
)
def
test_get_expected_feature_map_shapes_with_inception_v2
(
self
):
with
test_utils
.
GraphContextOrNone
()
as
g
:
image_features
=
{
'Mixed_3c'
:
tf
.
random_uniform
([
4
,
28
,
28
,
256
],
dtype
=
tf
.
float32
),
'Mixed_4c'
:
tf
.
random_uniform
([
4
,
14
,
14
,
576
],
dtype
=
tf
.
float32
),
'Mixed_5c'
:
tf
.
random_uniform
([
4
,
7
,
7
,
1024
],
dtype
=
tf
.
float32
)
}
feature_map_generator
=
self
.
_build_feature_map_generator
(
feature_map_layout
=
INCEPTION_V2_LAYOUT
)
def
graph_fn
():
feature_maps
=
feature_map_generator
(
image_features
)
return
feature_maps
expected_feature_map_shapes
=
{
'Mixed_3c'
:
(
4
,
28
,
28
,
256
),
...
...
@@ -116,29 +116,25 @@ class MultiResolutionFeatureMapGeneratorTest(tf.test.TestCase):
'Mixed_5c_2_Conv2d_3_3x3_s2_512'
:
(
4
,
4
,
4
,
512
),
'Mixed_5c_2_Conv2d_4_3x3_s2_256'
:
(
4
,
2
,
2
,
256
),
'Mixed_5c_2_Conv2d_5_3x3_s2_256'
:
(
4
,
1
,
1
,
256
)}
init_op
=
tf
.
global_variables_initializer
()
with
self
.
test_session
()
as
sess
:
sess
.
run
(
init_op
)
out_feature_maps
=
sess
.
run
(
feature_maps
)
out_feature_map_shapes
=
dict
(
(
key
,
value
.
shape
)
for
key
,
value
in
out_feature_maps
.
items
())
self
.
assertDictEqual
(
expected_feature_map_shapes
,
out_feature_map_shapes
)
out_feature_maps
=
self
.
execute
(
graph_fn
,
[],
g
)
out_feature_map_shapes
=
dict
(
(
key
,
value
.
shape
)
for
key
,
value
in
out_feature_maps
.
items
())
self
.
assertDictEqual
(
expected_feature_map_shapes
,
out_feature_map_shapes
)
def
test_get_expected_feature_map_shapes_with_inception_v2_use_depthwise
(
self
,
use_keras
):
image_features
=
{
'Mixed_3c'
:
tf
.
random_uniform
([
4
,
28
,
28
,
256
],
dtype
=
tf
.
float32
),
'Mixed_
4
c'
:
tf
.
random_uniform
([
4
,
14
,
14
,
57
6
],
dtype
=
tf
.
float32
),
'Mixed_
5
c'
:
tf
.
random_uniform
([
4
,
7
,
7
,
1024
],
dtype
=
tf
.
float32
)
}
layout_copy
=
INCEPTION_V2_LAYOUT
.
copy
()
layout_copy
[
'use_depthwise'
]
=
True
feature_map_generator
=
self
.
_build_feature_map_generator
(
feature_map_
layout
=
layout_copy
,
use_keras
=
use_keras
)
feature_maps
=
feature_map_generator
(
image_features
)
self
):
with
test_utils
.
GraphContextOrNone
()
as
g
:
image_features
=
{
'Mixed_
3
c'
:
tf
.
random_uniform
([
4
,
28
,
28
,
25
6
],
dtype
=
tf
.
float32
),
'Mixed_
4
c'
:
tf
.
random_uniform
([
4
,
14
,
14
,
576
],
dtype
=
tf
.
float32
)
,
'Mixed_5c'
:
tf
.
random_uniform
([
4
,
7
,
7
,
1024
],
dtype
=
tf
.
float32
)
}
layout_copy
=
INCEPTION_V2_LAYOUT
.
copy
()
layout_copy
[
'use_depthwise'
]
=
True
feature_map_
generator
=
self
.
_build_feature_map_generator
(
feature_map_layout
=
layout_copy
)
def
graph_fn
():
return
feature_map_generator
(
image_features
)
expected_feature_map_shapes
=
{
'Mixed_3c'
:
(
4
,
28
,
28
,
256
),
...
...
@@ -147,29 +143,25 @@ class MultiResolutionFeatureMapGeneratorTest(tf.test.TestCase):
'Mixed_5c_2_Conv2d_3_3x3_s2_512'
:
(
4
,
4
,
4
,
512
),
'Mixed_5c_2_Conv2d_4_3x3_s2_256'
:
(
4
,
2
,
2
,
256
),
'Mixed_5c_2_Conv2d_5_3x3_s2_256'
:
(
4
,
1
,
1
,
256
)}
init_op
=
tf
.
global_variables_initializer
()
with
self
.
test_session
()
as
sess
:
sess
.
run
(
init_op
)
out_feature_maps
=
sess
.
run
(
feature_maps
)
out_feature_map_shapes
=
dict
(
(
key
,
value
.
shape
)
for
key
,
value
in
out_feature_maps
.
items
())
self
.
assertDictEqual
(
expected_feature_map_shapes
,
out_feature_map_shapes
)
def
test_get_expected_feature_map_shapes_use_explicit_padding
(
self
,
use_keras
):
image_features
=
{
'Mixed_3c'
:
tf
.
random_uniform
([
4
,
28
,
28
,
256
],
dtype
=
tf
.
float32
),
'Mixed_4c'
:
tf
.
random_uniform
([
4
,
14
,
14
,
576
],
dtype
=
tf
.
float32
),
'Mixed_5c'
:
tf
.
random_uniform
([
4
,
7
,
7
,
1024
],
dtype
=
tf
.
float32
)
}
layout_copy
=
INCEPTION_V2_LAYOUT
.
copy
()
layout_copy
[
'use_explicit_padding'
]
=
True
feature_map_generator
=
self
.
_build_feature_map_generator
(
feature_map_layout
=
layout_copy
,
use_keras
=
use_keras
)
feature_maps
=
feature_map_generator
(
image_features
)
out_feature_maps
=
self
.
execute
(
graph_fn
,
[],
g
)
out_feature_map_shapes
=
dict
(
(
key
,
value
.
shape
)
for
key
,
value
in
out_feature_maps
.
items
())
self
.
assertDictEqual
(
expected_feature_map_shapes
,
out_feature_map_shapes
)
def
test_get_expected_feature_map_shapes_use_explicit_padding
(
self
):
with
test_utils
.
GraphContextOrNone
()
as
g
:
image_features
=
{
'Mixed_3c'
:
tf
.
random_uniform
([
4
,
28
,
28
,
256
],
dtype
=
tf
.
float32
),
'Mixed_4c'
:
tf
.
random_uniform
([
4
,
14
,
14
,
576
],
dtype
=
tf
.
float32
),
'Mixed_5c'
:
tf
.
random_uniform
([
4
,
7
,
7
,
1024
],
dtype
=
tf
.
float32
)
}
layout_copy
=
INCEPTION_V2_LAYOUT
.
copy
()
layout_copy
[
'use_explicit_padding'
]
=
True
feature_map_generator
=
self
.
_build_feature_map_generator
(
feature_map_layout
=
layout_copy
,
)
def
graph_fn
():
return
feature_map_generator
(
image_features
)
expected_feature_map_shapes
=
{
'Mixed_3c'
:
(
4
,
28
,
28
,
256
),
...
...
@@ -178,27 +170,24 @@ class MultiResolutionFeatureMapGeneratorTest(tf.test.TestCase):
'Mixed_5c_2_Conv2d_3_3x3_s2_512'
:
(
4
,
4
,
4
,
512
),
'Mixed_5c_2_Conv2d_4_3x3_s2_256'
:
(
4
,
2
,
2
,
256
),
'Mixed_5c_2_Conv2d_5_3x3_s2_256'
:
(
4
,
1
,
1
,
256
)}
out_feature_maps
=
self
.
execute
(
graph_fn
,
[],
g
)
out_feature_map_shapes
=
dict
(
(
key
,
value
.
shape
)
for
key
,
value
in
out_feature_maps
.
items
())
self
.
assertDictEqual
(
expected_feature_map_shapes
,
out_feature_map_shapes
)
def
test_get_expected_feature_map_shapes_with_inception_v3
(
self
):
with
test_utils
.
GraphContextOrNone
()
as
g
:
image_features
=
{
'Mixed_5d'
:
tf
.
random_uniform
([
4
,
35
,
35
,
256
],
dtype
=
tf
.
float32
),
'Mixed_6e'
:
tf
.
random_uniform
([
4
,
17
,
17
,
576
],
dtype
=
tf
.
float32
),
'Mixed_7c'
:
tf
.
random_uniform
([
4
,
8
,
8
,
1024
],
dtype
=
tf
.
float32
)
}
init_op
=
tf
.
global_variables_initializer
()
with
self
.
test_session
()
as
sess
:
sess
.
run
(
init_op
)
out_feature_maps
=
sess
.
run
(
feature_maps
)
out_feature_map_shapes
=
dict
(
(
key
,
value
.
shape
)
for
key
,
value
in
out_feature_maps
.
items
())
self
.
assertDictEqual
(
expected_feature_map_shapes
,
out_feature_map_shapes
)
def
test_get_expected_feature_map_shapes_with_inception_v3
(
self
,
use_keras
):
image_features
=
{
'Mixed_5d'
:
tf
.
random_uniform
([
4
,
35
,
35
,
256
],
dtype
=
tf
.
float32
),
'Mixed_6e'
:
tf
.
random_uniform
([
4
,
17
,
17
,
576
],
dtype
=
tf
.
float32
),
'Mixed_7c'
:
tf
.
random_uniform
([
4
,
8
,
8
,
1024
],
dtype
=
tf
.
float32
)
}
feature_map_generator
=
self
.
_build_feature_map_generator
(
feature_map_layout
=
INCEPTION_V3_LAYOUT
,
use_keras
=
use_keras
)
feature_maps
=
feature_map_generator
(
image_features
)
feature_map_generator
=
self
.
_build_feature_map_generator
(
feature_map_layout
=
INCEPTION_V3_LAYOUT
,
)
def
graph_fn
():
return
feature_map_generator
(
image_features
)
expected_feature_map_shapes
=
{
'Mixed_5d'
:
(
4
,
35
,
35
,
256
),
...
...
@@ -207,29 +196,26 @@ class MultiResolutionFeatureMapGeneratorTest(tf.test.TestCase):
'Mixed_7c_2_Conv2d_3_3x3_s2_512'
:
(
4
,
4
,
4
,
512
),
'Mixed_7c_2_Conv2d_4_3x3_s2_256'
:
(
4
,
2
,
2
,
256
),
'Mixed_7c_2_Conv2d_5_3x3_s2_128'
:
(
4
,
1
,
1
,
128
)}
init_op
=
tf
.
global_variables_initializer
()
with
self
.
test_session
()
as
sess
:
sess
.
run
(
init_op
)
out_feature_maps
=
sess
.
run
(
feature_maps
)
out_feature_map_shapes
=
dict
(
(
key
,
value
.
shape
)
for
key
,
value
in
out_feature_maps
.
items
())
self
.
assertDictEqual
(
expected_feature_map_shapes
,
out_feature_map_shapes
)
out_feature_maps
=
self
.
execute
(
graph_fn
,
[],
g
)
out_feature_map_shapes
=
dict
(
(
key
,
value
.
shape
)
for
key
,
value
in
out_feature_maps
.
items
())
self
.
assertDictEqual
(
expected_feature_map_shapes
,
out_feature_map_shapes
)
def
test_get_expected_feature_map_shapes_with_embedded_ssd_mobilenet_v1
(
self
,
use_keras
):
image_features
=
{
'Conv2d_11_pointwise'
:
tf
.
random_uniform
([
4
,
16
,
16
,
512
],
dtype
=
tf
.
float32
),
'Conv2d_13_pointwise'
:
tf
.
random_uniform
([
4
,
8
,
8
,
1024
],
dtype
=
tf
.
float32
),
}
self
):
with
test_utils
.
GraphContextOrNone
()
as
g
:
image_features
=
{
'Conv2d_11_pointwise'
:
tf
.
random_uniform
([
4
,
16
,
16
,
512
],
dtype
=
tf
.
float32
),
'Conv2d_13_pointwise'
:
tf
.
random_uniform
([
4
,
8
,
8
,
1024
],
dtype
=
tf
.
float32
),
}
feature_map_generator
=
self
.
_build_feature_map_generator
(
feature_map_layout
=
EMBEDDED_SSD_MOBILENET_V1_LAYOUT
,
use_keras
=
use_keras
)
feature_maps
=
feature_map_generator
(
image_features
)
feature_map_generator
=
self
.
_build_feature_map_generator
(
feature_map_layout
=
EMBEDDED_SSD_MOBILENET_V1_LAYOUT
,
)
def
graph_fn
():
return
feature_map_generator
(
image_features
)
expected_feature_map_shapes
=
{
'Conv2d_11_pointwise'
:
(
4
,
16
,
16
,
512
),
...
...
@@ -237,55 +223,50 @@ class MultiResolutionFeatureMapGeneratorTest(tf.test.TestCase):
'Conv2d_13_pointwise_2_Conv2d_2_3x3_s2_512'
:
(
4
,
4
,
4
,
512
),
'Conv2d_13_pointwise_2_Conv2d_3_3x3_s2_256'
:
(
4
,
2
,
2
,
256
),
'Conv2d_13_pointwise_2_Conv2d_4_2x2_s2_256'
:
(
4
,
1
,
1
,
256
)}
init_op
=
tf
.
global_variables_initializer
()
with
self
.
test_session
()
as
sess
:
sess
.
run
(
init_op
)
out_feature_maps
=
sess
.
run
(
feature_maps
)
out_feature_map_shapes
=
dict
(
(
key
,
value
.
shape
)
for
key
,
value
in
out_feature_maps
.
items
())
self
.
assertDictEqual
(
expected_feature_map_shapes
,
out_feature_map_shapes
)
out_feature_maps
=
self
.
execute
(
graph_fn
,
[],
g
)
out_feature_map_shapes
=
dict
(
(
key
,
value
.
shape
)
for
key
,
value
in
out_feature_maps
.
items
())
self
.
assertDictEqual
(
expected_feature_map_shapes
,
out_feature_map_shapes
)
def
test_feature_map_shapes_with_pool_residual_ssd_mobilenet_v1
(
self
,
use_keras
):
image_features
=
{
'Conv2d_13_pointwise'
:
tf
.
random_uniform
([
4
,
8
,
8
,
1024
],
dtype
=
tf
.
float32
),
}
self
):
with
test_utils
.
GraphContextOrNone
()
as
g
:
image_features
=
{
'Conv2d_13_pointwise'
:
tf
.
random_uniform
([
4
,
8
,
8
,
1024
],
dtype
=
tf
.
float32
),
}
feature_map_generator
=
self
.
_build_feature_map_generator
(
feature_map_layout
=
SSD_MOBILENET_V1_WEIGHT_SHARED_LAYOUT
,
use_keras
=
use_keras
,
pool_residual
=
True
)
feature_maps
=
feature_map_generator
(
image_features
)
feature_map_generator
=
self
.
_build_feature_map_generator
(
feature_map_layout
=
SSD_MOBILENET_V1_WEIGHT_SHARED_LAYOUT
,
pool_residual
=
True
)
def
graph_fn
():
return
feature_map_generator
(
image_features
)
expected_feature_map_shapes
=
{
'Conv2d_13_pointwise'
:
(
4
,
8
,
8
,
1024
),
'Conv2d_13_pointwise_2_Conv2d_1_3x3_s2_256'
:
(
4
,
4
,
4
,
256
),
'Conv2d_13_pointwise_2_Conv2d_2_3x3_s2_256'
:
(
4
,
2
,
2
,
256
),
'Conv2d_13_pointwise_2_Conv2d_3_3x3_s2_256'
:
(
4
,
1
,
1
,
256
)}
out_feature_maps
=
self
.
execute
(
graph_fn
,
[],
g
)
out_feature_map_shapes
=
dict
(
(
key
,
value
.
shape
)
for
key
,
value
in
out_feature_maps
.
items
())
self
.
assertDictEqual
(
expected_feature_map_shapes
,
out_feature_map_shapes
)
def
test_get_expected_variable_names_with_inception_v2
(
self
):
with
test_utils
.
GraphContextOrNone
()
as
g
:
image_features
=
{
'Mixed_3c'
:
tf
.
random_uniform
([
4
,
28
,
28
,
256
],
dtype
=
tf
.
float32
),
'Mixed_4c'
:
tf
.
random_uniform
([
4
,
14
,
14
,
576
],
dtype
=
tf
.
float32
),
'Mixed_5c'
:
tf
.
random_uniform
([
4
,
7
,
7
,
1024
],
dtype
=
tf
.
float32
)
}
feature_map_generator
=
self
.
_build_feature_map_generator
(
feature_map_layout
=
INCEPTION_V2_LAYOUT
,
)
def
graph_fn
():
return
feature_map_generator
(
image_features
)
init_op
=
tf
.
global_variables_initializer
()
with
self
.
test_session
()
as
sess
:
sess
.
run
(
init_op
)
out_feature_maps
=
sess
.
run
(
feature_maps
)
out_feature_map_shapes
=
dict
(
(
key
,
value
.
shape
)
for
key
,
value
in
out_feature_maps
.
items
())
self
.
assertDictEqual
(
expected_feature_map_shapes
,
out_feature_map_shapes
)
def
test_get_expected_variable_names_with_inception_v2
(
self
,
use_keras
):
image_features
=
{
'Mixed_3c'
:
tf
.
random_uniform
([
4
,
28
,
28
,
256
],
dtype
=
tf
.
float32
),
'Mixed_4c'
:
tf
.
random_uniform
([
4
,
14
,
14
,
576
],
dtype
=
tf
.
float32
),
'Mixed_5c'
:
tf
.
random_uniform
([
4
,
7
,
7
,
1024
],
dtype
=
tf
.
float32
)
}
feature_map_generator
=
self
.
_build_feature_map_generator
(
feature_map_layout
=
INCEPTION_V2_LAYOUT
,
use_keras
=
use_keras
)
feature_maps
=
feature_map_generator
(
image_features
)
self
.
execute
(
graph_fn
,
[],
g
)
expected_slim_variables
=
set
([
'Mixed_5c_1_Conv2d_3_1x1_256/weights'
,
'Mixed_5c_1_Conv2d_3_1x1_256/biases'
,
...
...
@@ -316,32 +297,32 @@ class MultiResolutionFeatureMapGeneratorTest(tf.test.TestCase):
'FeatureMaps/Mixed_5c_2_Conv2d_5_3x3_s2_256_conv/bias'
,
])
init_op
=
tf
.
global_variables_initializer
()
with
self
.
test_session
()
as
sess
:
sess
.
run
(
init_op
)
sess
.
run
(
feature_maps
)
if
tf_version
.
is_tf2
():
actual_variable_set
=
set
(
[
var
.
op
.
name
for
var
in
tf
.
trainable_variables
()])
if
use_keras
:
self
.
assertSetEqual
(
expected_keras_variables
,
actual_variable_set
)
else
:
self
.
assertSetEqual
(
expected_slim_variables
,
actual_variable_set
)
[
var
.
name
.
split
(
':'
)[
0
]
for
var
in
feature_map_generator
.
variables
])
self
.
assertSetEqual
(
expected_keras_variables
,
actual_variable_set
)
else
:
with
g
.
as_default
():
actual_variable_set
=
set
(
[
var
.
op
.
name
for
var
in
tf
.
trainable_variables
()])
self
.
assertSetEqual
(
expected_slim_variables
,
actual_variable_set
)
def
test_get_expected_variable_names_with_inception_v2_use_depthwise
(
self
,
use_keras
):
image_features
=
{
'Mixed_3c'
:
tf
.
random_uniform
([
4
,
28
,
28
,
256
],
dtype
=
tf
.
float32
),
'Mixed_4c'
:
tf
.
random_uniform
([
4
,
14
,
14
,
576
],
dtype
=
tf
.
float32
),
'Mixed_5c'
:
tf
.
random_uniform
([
4
,
7
,
7
,
1024
],
dtype
=
tf
.
float32
)
}
layout_copy
=
INCEPTION_V2_LAYOUT
.
copy
()
layout_copy
[
'use_depthwise'
]
=
True
feature_map_generator
=
self
.
_build_feature_map_generator
(
feature_map_layout
=
layout_copy
,
use_keras
=
use_keras
)
feature_maps
=
feature_map_generator
(
image_features
)
self
):
with
test_utils
.
GraphContextOrNone
()
as
g
:
image_features
=
{
'Mixed_3c'
:
tf
.
random_uniform
([
4
,
28
,
28
,
256
],
dtype
=
tf
.
float32
),
'Mixed_4c'
:
tf
.
random_uniform
([
4
,
14
,
14
,
576
],
dtype
=
tf
.
float32
),
'Mixed_5c'
:
tf
.
random_uniform
([
4
,
7
,
7
,
1024
],
dtype
=
tf
.
float32
)
}
layout_copy
=
INCEPTION_V2_LAYOUT
.
copy
()
layout_copy
[
'use_depthwise'
]
=
True
feature_map_generator
=
self
.
_build_feature_map_generator
(
feature_map_layout
=
layout_copy
,
)
def
graph_fn
():
return
feature_map_generator
(
image_features
)
self
.
execute
(
graph_fn
,
[],
g
)
expected_slim_variables
=
set
([
'Mixed_5c_1_Conv2d_3_1x1_256/weights'
,
...
...
@@ -391,23 +372,20 @@ class MultiResolutionFeatureMapGeneratorTest(tf.test.TestCase):
'FeatureMaps/Mixed_5c_2_Conv2d_5_3x3_s2_256_conv/bias'
,
])
init_op
=
tf
.
global_variables_initializer
()
with
self
.
test_session
()
as
sess
:
sess
.
run
(
init_op
)
sess
.
run
(
feature_maps
)
if
tf_version
.
is_tf2
():
actual_variable_set
=
set
(
[
var
.
op
.
name
for
var
in
tf
.
trainable_variables
()])
if
use_keras
:
self
.
assertSetEqual
(
expected_keras_variables
,
actual_variable_set
)
else
:
self
.
assertSetEqual
(
expected_slim_variables
,
actual_variable_set
)
[
var
.
name
.
split
(
':'
)[
0
]
for
var
in
feature_map_generator
.
variables
])
self
.
assertSetEqual
(
expected_keras_variables
,
actual_variable_set
)
else
:
with
g
.
as_default
():
actual_variable_set
=
set
(
[
var
.
op
.
name
for
var
in
tf
.
trainable_variables
()])
self
.
assertSetEqual
(
expected_slim_variables
,
actual_variable_set
)
@
parameterized
.
parameters
({
'use_native_resize_op'
:
True
,
'use_keras'
:
False
},
{
'use_native_resize_op'
:
False
,
'use_keras'
:
False
},
{
'use_native_resize_op'
:
True
,
'use_keras'
:
True
},
{
'use_native_resize_op'
:
False
,
'use_keras'
:
True
})
class
FPNFeatureMapGeneratorTest
(
tf
.
test
.
TestCase
,
parameterized
.
TestCase
):
@
parameterized
.
parameters
({
'use_native_resize_op'
:
True
},
{
'use_native_resize_op'
:
False
})
class
FPNFeatureMapGeneratorTest
(
test_case
.
TestCase
,
parameterized
.
TestCase
):
def
_build_conv_hyperparams
(
self
):
conv_hyperparams
=
hyperparams_pb2
.
Hyperparams
()
...
...
@@ -425,10 +403,10 @@ class FPNFeatureMapGeneratorTest(tf.test.TestCase, parameterized.TestCase):
return
hyperparams_builder
.
KerasLayerHyperparams
(
conv_hyperparams
)
def
_build_feature_map_generator
(
self
,
image_features
,
depth
,
use_keras
,
use_bounded_activations
=
False
,
self
,
image_features
,
depth
,
use_bounded_activations
=
False
,
use_native_resize_op
=
False
,
use_explicit_padding
=
False
,
use_depthwise
=
False
):
if
use_keras
:
if
tf_version
.
is_tf2
()
:
return
feature_map_generators
.
KerasFpnTopDownFeatureMaps
(
num_levels
=
len
(
image_features
),
depth
=
depth
,
...
...
@@ -454,19 +432,20 @@ class FPNFeatureMapGeneratorTest(tf.test.TestCase, parameterized.TestCase):
return
feature_map_generator
def
test_get_expected_feature_map_shapes
(
self
,
use_native_resize_op
,
use_keras
):
image_features
=
[
(
'block2'
,
tf
.
random_uniform
([
4
,
8
,
8
,
256
],
dtype
=
tf
.
float32
)),
(
'block3'
,
tf
.
random_uniform
([
4
,
4
,
4
,
256
],
dtype
=
tf
.
float32
)),
(
'block4'
,
tf
.
random_uniform
([
4
,
2
,
2
,
256
],
dtype
=
tf
.
float32
)),
(
'block5'
,
tf
.
random_uniform
([
4
,
1
,
1
,
256
],
dtype
=
tf
.
float32
))
]
feature_map_generator
=
self
.
_build_feature_map_generator
(
image_features
=
image_features
,
depth
=
128
,
use_keras
=
use_keras
,
use_native_resize_op
=
use_native_resize_op
)
feature_maps
=
feature_map_generator
(
image_features
)
self
,
use_native_resize_op
):
with
test_utils
.
GraphContextOrNone
()
as
g
:
image_features
=
[
(
'block2'
,
tf
.
random_uniform
([
4
,
8
,
8
,
256
],
dtype
=
tf
.
float32
)),
(
'block3'
,
tf
.
random_uniform
([
4
,
4
,
4
,
256
],
dtype
=
tf
.
float32
)),
(
'block4'
,
tf
.
random_uniform
([
4
,
2
,
2
,
256
],
dtype
=
tf
.
float32
)),
(
'block5'
,
tf
.
random_uniform
([
4
,
1
,
1
,
256
],
dtype
=
tf
.
float32
))
]
feature_map_generator
=
self
.
_build_feature_map_generator
(
image_features
=
image_features
,
depth
=
128
,
use_native_resize_op
=
use_native_resize_op
)
def
graph_fn
():
return
feature_map_generator
(
image_features
)
expected_feature_map_shapes
=
{
'top_down_block2'
:
(
4
,
8
,
8
,
128
),
...
...
@@ -474,30 +453,27 @@ class FPNFeatureMapGeneratorTest(tf.test.TestCase, parameterized.TestCase):
'top_down_block4'
:
(
4
,
2
,
2
,
128
),
'top_down_block5'
:
(
4
,
1
,
1
,
128
)
}
init_op
=
tf
.
global_variables_initializer
()
with
self
.
test_session
()
as
sess
:
sess
.
run
(
init_op
)
out_feature_maps
=
sess
.
run
(
feature_maps
)
out_feature_map_shapes
=
{
key
:
value
.
shape
for
key
,
value
in
out_feature_maps
.
items
()}
self
.
assertDictEqual
(
out_feature_map_shapes
,
expected_feature_map_shapes
)
out_feature_maps
=
self
.
execute
(
graph_fn
,
[],
g
)
out_feature_map_shapes
=
dict
(
(
key
,
value
.
shape
)
for
key
,
value
in
out_feature_maps
.
items
())
self
.
assertDictEqual
(
expected_feature_map_shapes
,
out_feature_map_shapes
)
def
test_get_expected_feature_map_shapes_with_explicit_padding
(
self
,
use_native_resize_op
,
use_keras
):
image_features
=
[
(
'block2'
,
tf
.
random_uniform
([
4
,
8
,
8
,
256
],
dtype
=
tf
.
float32
)),
(
'block3'
,
tf
.
random_uniform
([
4
,
4
,
4
,
256
],
dtype
=
tf
.
float32
)),
(
'block4'
,
tf
.
random_uniform
([
4
,
2
,
2
,
256
],
dtype
=
tf
.
float32
)),
(
'block5'
,
tf
.
random_uniform
([
4
,
1
,
1
,
256
],
dtype
=
tf
.
float32
))
]
feature_map_generator
=
self
.
_build_feature_map_generator
(
image_features
=
image_features
,
depth
=
128
,
use_keras
=
use_keras
,
use_explicit_padding
=
True
,
use_native_resize_op
=
use_native_resize_op
)
feature_maps
=
feature_map_generator
(
image_features
)
self
,
use_native_resize_op
):
with
test_utils
.
GraphContextOrNone
()
as
g
:
image_features
=
[
(
'block2'
,
tf
.
random_uniform
([
4
,
8
,
8
,
256
],
dtype
=
tf
.
float32
)),
(
'block3'
,
tf
.
random_uniform
([
4
,
4
,
4
,
256
],
dtype
=
tf
.
float32
)),
(
'block4'
,
tf
.
random_uniform
([
4
,
2
,
2
,
256
],
dtype
=
tf
.
float32
)),
(
'block5'
,
tf
.
random_uniform
([
4
,
1
,
1
,
256
],
dtype
=
tf
.
float32
))
]
feature_map_generator
=
self
.
_build_feature_map_generator
(
image_features
=
image_features
,
depth
=
128
,
use_explicit_padding
=
True
,
use_native_resize_op
=
use_native_resize_op
)
def
graph_fn
():
return
feature_map_generator
(
image_features
)
expected_feature_map_shapes
=
{
'top_down_block2'
:
(
4
,
8
,
8
,
128
),
...
...
@@ -505,19 +481,15 @@ class FPNFeatureMapGeneratorTest(tf.test.TestCase, parameterized.TestCase):
'top_down_block4'
:
(
4
,
2
,
2
,
128
),
'top_down_block5'
:
(
4
,
1
,
1
,
128
)
}
out_feature_maps
=
self
.
execute
(
graph_fn
,
[],
g
)
out_feature_map_shapes
=
dict
(
(
key
,
value
.
shape
)
for
key
,
value
in
out_feature_maps
.
items
())
self
.
assertDictEqual
(
expected_feature_map_shapes
,
out_feature_map_shapes
)
init_op
=
tf
.
global_variables_initializer
()
with
self
.
test_session
()
as
sess
:
sess
.
run
(
init_op
)
out_feature_maps
=
sess
.
run
(
feature_maps
)
out_feature_map_shapes
=
{
key
:
value
.
shape
for
key
,
value
in
out_feature_maps
.
items
()}
self
.
assertDictEqual
(
out_feature_map_shapes
,
expected_feature_map_shapes
)
@
unittest
.
skipIf
(
tf_version
.
is_tf2
(),
'Skipping TF1.X only test.'
)
def
test_use_bounded_activations_add_operations
(
self
,
use_native_resize_op
,
use_keras
):
tf_graph
=
tf
.
Graph
()
with
tf_graph
.
as_default
():
self
,
use_native_resize_op
):
with
test_utils
.
GraphContextOrNone
()
as
g
:
image_features
=
[(
'block2'
,
tf
.
random_uniform
([
4
,
8
,
8
,
256
],
dtype
=
tf
.
float32
)),
(
'block3'
,
...
...
@@ -529,34 +501,23 @@ class FPNFeatureMapGeneratorTest(tf.test.TestCase, parameterized.TestCase):
feature_map_generator
=
self
.
_build_feature_map_generator
(
image_features
=
image_features
,
depth
=
128
,
use_keras
=
use_keras
,
use_bounded_activations
=
True
,
use_native_resize_op
=
use_native_resize_op
)
feature_map_generator
(
image_features
)
if
use_keras
:
expected_added_operations
=
dict
.
fromkeys
([
'FeatureMaps/top_down/clip_by_value/clip_by_value'
,
'FeatureMaps/top_down/clip_by_value_1/clip_by_value'
,
'FeatureMaps/top_down/clip_by_value_2/clip_by_value'
,
'FeatureMaps/top_down/clip_by_value_3/clip_by_value'
,
'FeatureMaps/top_down/clip_by_value_4/clip_by_value'
,
'FeatureMaps/top_down/clip_by_value_5/clip_by_value'
,
'FeatureMaps/top_down/clip_by_value_6/clip_by_value'
,
])
else
:
expected_added_operations
=
dict
.
fromkeys
([
'top_down/clip_by_value'
,
'top_down/clip_by_value_1'
,
'top_down/clip_by_value_2'
,
'top_down/clip_by_value_3'
,
'top_down/clip_by_value_4'
,
'top_down/clip_by_value_5'
,
'top_down/clip_by_value_6'
])
op_names
=
{
op
.
name
:
None
for
op
in
tf_graph
.
get_operations
()}
self
.
assertDictContainsSubset
(
expected_added_operations
,
op_names
)
def
graph_fn
():
return
feature_map_generator
(
image_features
)
self
.
execute
(
graph_fn
,
[],
g
)
expected_added_operations
=
dict
.
fromkeys
([
'top_down/clip_by_value'
,
'top_down/clip_by_value_1'
,
'top_down/clip_by_value_2'
,
'top_down/clip_by_value_3'
,
'top_down/clip_by_value_4'
,
'top_down/clip_by_value_5'
,
'top_down/clip_by_value_6'
])
op_names
=
{
op
.
name
:
None
for
op
in
g
.
get_operations
()}
self
.
assertDictContainsSubset
(
expected_added_operations
,
op_names
)
@
unittest
.
skipIf
(
tf_version
.
is_tf2
(),
'Skipping TF1.X only test.'
)
def
test_use_bounded_activations_clip_value
(
self
,
use_native_resize_op
,
use_keras
):
self
,
use_native_resize_op
):
tf_graph
=
tf
.
Graph
()
with
tf_graph
.
as_default
():
image_features
=
[
...
...
@@ -568,28 +529,16 @@ class FPNFeatureMapGeneratorTest(tf.test.TestCase, parameterized.TestCase):
feature_map_generator
=
self
.
_build_feature_map_generator
(
image_features
=
image_features
,
depth
=
128
,
use_keras
=
use_keras
,
use_bounded_activations
=
True
,
use_native_resize_op
=
use_native_resize_op
)
feature_map_generator
(
image_features
)
if
use_keras
:
expected_clip_by_value_ops
=
dict
.
fromkeys
([
'FeatureMaps/top_down/clip_by_value/clip_by_value'
,
'FeatureMaps/top_down/clip_by_value_1/clip_by_value'
,
'FeatureMaps/top_down/clip_by_value_2/clip_by_value'
,
'FeatureMaps/top_down/clip_by_value_3/clip_by_value'
,
'FeatureMaps/top_down/clip_by_value_4/clip_by_value'
,
'FeatureMaps/top_down/clip_by_value_5/clip_by_value'
,
'FeatureMaps/top_down/clip_by_value_6/clip_by_value'
,
])
else
:
expected_clip_by_value_ops
=
[
'top_down/clip_by_value'
,
'top_down/clip_by_value_1'
,
'top_down/clip_by_value_2'
,
'top_down/clip_by_value_3'
,
'top_down/clip_by_value_4'
,
'top_down/clip_by_value_5'
,
'top_down/clip_by_value_6'
]
expected_clip_by_value_ops
=
[
'top_down/clip_by_value'
,
'top_down/clip_by_value_1'
,
'top_down/clip_by_value_2'
,
'top_down/clip_by_value_3'
,
'top_down/clip_by_value_4'
,
'top_down/clip_by_value_5'
,
'top_down/clip_by_value_6'
]
# Gathers activation tensors before and after clip_by_value operations.
activations
=
{}
...
...
@@ -631,20 +580,21 @@ class FPNFeatureMapGeneratorTest(tf.test.TestCase, parameterized.TestCase):
self
.
assertLessEqual
(
after_clipping_upper_bound
,
expected_upper_bound
)
def
test_get_expected_feature_map_shapes_with_depthwise
(
self
,
use_native_resize_op
,
use_keras
):
image_features
=
[
(
'block2'
,
tf
.
random_uniform
([
4
,
8
,
8
,
256
],
dtype
=
tf
.
float32
)),
(
'block3'
,
tf
.
random_uniform
([
4
,
4
,
4
,
256
],
dtype
=
tf
.
float32
)),
(
'block4'
,
tf
.
random_uniform
([
4
,
2
,
2
,
256
],
dtype
=
tf
.
float32
)),
(
'block5'
,
tf
.
random_uniform
([
4
,
1
,
1
,
256
],
dtype
=
tf
.
float32
))
]
feature_map_generator
=
self
.
_build_feature_map_generator
(
image_features
=
image_features
,
depth
=
128
,
use_keras
=
use_keras
,
use_depthwise
=
True
,
use_native_resize_op
=
use_native_resize_op
)
feature_maps
=
feature_map_generator
(
image_features
)
self
,
use_native_resize_op
):
with
test_utils
.
GraphContextOrNone
()
as
g
:
image_features
=
[
(
'block2'
,
tf
.
random_uniform
([
4
,
8
,
8
,
256
],
dtype
=
tf
.
float32
)),
(
'block3'
,
tf
.
random_uniform
([
4
,
4
,
4
,
256
],
dtype
=
tf
.
float32
)),
(
'block4'
,
tf
.
random_uniform
([
4
,
2
,
2
,
256
],
dtype
=
tf
.
float32
)),
(
'block5'
,
tf
.
random_uniform
([
4
,
1
,
1
,
256
],
dtype
=
tf
.
float32
))
]
feature_map_generator
=
self
.
_build_feature_map_generator
(
image_features
=
image_features
,
depth
=
128
,
use_depthwise
=
True
,
use_native_resize_op
=
use_native_resize_op
)
def
graph_fn
():
return
feature_map_generator
(
image_features
)
expected_feature_map_shapes
=
{
'top_down_block2'
:
(
4
,
8
,
8
,
128
),
...
...
@@ -652,30 +602,27 @@ class FPNFeatureMapGeneratorTest(tf.test.TestCase, parameterized.TestCase):
'top_down_block4'
:
(
4
,
2
,
2
,
128
),
'top_down_block5'
:
(
4
,
1
,
1
,
128
)
}
init_op
=
tf
.
global_variables_initializer
()
with
self
.
test_session
()
as
sess
:
sess
.
run
(
init_op
)
out_feature_maps
=
sess
.
run
(
feature_maps
)
out_feature_map_shapes
=
{
key
:
value
.
shape
for
key
,
value
in
out_feature_maps
.
items
()}
self
.
assertDictEqual
(
out_feature_map_shapes
,
expected_feature_map_shapes
)
out_feature_maps
=
self
.
execute
(
graph_fn
,
[],
g
)
out_feature_map_shapes
=
dict
(
(
key
,
value
.
shape
)
for
key
,
value
in
out_feature_maps
.
items
())
self
.
assertDictEqual
(
expected_feature_map_shapes
,
out_feature_map_shapes
)
def
test_get_expected_variable_names
(
self
,
use_native_resize_op
,
use_keras
):
image_features
=
[
(
'block2'
,
tf
.
random_uniform
([
4
,
8
,
8
,
256
],
dtype
=
tf
.
float32
)),
(
'block3'
,
tf
.
random_uniform
([
4
,
4
,
4
,
256
],
dtype
=
tf
.
float32
)),
(
'block4'
,
tf
.
random_uniform
([
4
,
2
,
2
,
256
],
dtype
=
tf
.
float32
)),
(
'block5'
,
tf
.
random_uniform
([
4
,
1
,
1
,
256
],
dtype
=
tf
.
float32
))
]
feature_map_generator
=
self
.
_build_feature_map_generator
(
image_features
=
image_features
,
depth
=
128
,
use_keras
=
use_keras
,
use_native_resize_op
=
use_native_resize_op
)
feature_maps
=
feature_map_generator
(
image_features
)
self
,
use_native_resize_op
):
with
test_utils
.
GraphContextOrNone
()
as
g
:
image_features
=
[
(
'block2'
,
tf
.
random_uniform
([
4
,
8
,
8
,
256
],
dtype
=
tf
.
float32
)),
(
'block3'
,
tf
.
random_uniform
([
4
,
4
,
4
,
256
],
dtype
=
tf
.
float32
)),
(
'block4'
,
tf
.
random_uniform
([
4
,
2
,
2
,
256
],
dtype
=
tf
.
float32
)),
(
'block5'
,
tf
.
random_uniform
([
4
,
1
,
1
,
256
],
dtype
=
tf
.
float32
))
]
feature_map_generator
=
self
.
_build_feature_map_generator
(
image_features
=
image_features
,
depth
=
128
,
use_native_resize_op
=
use_native_resize_op
)
def
graph_fn
():
return
feature_map_generator
(
image_features
)
self
.
execute
(
graph_fn
,
[],
g
)
expected_slim_variables
=
set
([
'projection_1/weights'
,
'projection_1/biases'
,
...
...
@@ -709,33 +656,34 @@ class FPNFeatureMapGeneratorTest(tf.test.TestCase, parameterized.TestCase):
'FeatureMaps/top_down/smoothing_3_conv/kernel'
,
'FeatureMaps/top_down/smoothing_3_conv/bias'
])
init_op
=
tf
.
global_variables_initializer
()
with
self
.
test_session
()
as
sess
:
sess
.
run
(
init_op
)
sess
.
run
(
feature_maps
)
if
tf_version
.
is_tf2
():
actual_variable_set
=
set
(
[
var
.
op
.
name
for
var
in
tf
.
trainable_variables
()])
if
use_keras
:
self
.
assertSetEqual
(
expected_keras_variables
,
actual_variable_set
)
else
:
self
.
assertSetEqual
(
expected_slim_variables
,
actual_variable_set
)
[
var
.
name
.
split
(
':'
)[
0
]
for
var
in
feature_map_generator
.
variables
])
self
.
assertSetEqual
(
expected_keras_variables
,
actual_variable_set
)
else
:
with
g
.
as_default
():
actual_variable_set
=
set
(
[
var
.
op
.
name
for
var
in
tf
.
trainable_variables
()])
self
.
assertSetEqual
(
expected_slim_variables
,
actual_variable_set
)
def
test_get_expected_variable_names_with_depthwise
(
self
,
use_native_resize_op
,
use_keras
):
image_features
=
[
(
'block2'
,
tf
.
random_uniform
([
4
,
8
,
8
,
256
],
dtype
=
tf
.
float32
)),
(
'block3'
,
tf
.
random_uniform
([
4
,
4
,
4
,
256
],
dtype
=
tf
.
float32
)),
(
'block4'
,
tf
.
random_uniform
([
4
,
2
,
2
,
256
],
dtype
=
tf
.
float32
)),
(
'block5'
,
tf
.
random_uniform
([
4
,
1
,
1
,
256
],
dtype
=
tf
.
float32
))
]
feature_map_generator
=
self
.
_build_feature_map_generator
(
image_features
=
image_features
,
depth
=
128
,
use_keras
=
use_keras
,
use_depthwise
=
True
,
use_native_resize_op
=
use_native_resize_op
)
feature_maps
=
feature_map_generator
(
image_features
)
self
,
use_native_resize_op
):
with
test_utils
.
GraphContextOrNone
()
as
g
:
image_features
=
[
(
'block2'
,
tf
.
random_uniform
([
4
,
8
,
8
,
256
],
dtype
=
tf
.
float32
)),
(
'block3'
,
tf
.
random_uniform
([
4
,
4
,
4
,
256
],
dtype
=
tf
.
float32
)),
(
'block4'
,
tf
.
random_uniform
([
4
,
2
,
2
,
256
],
dtype
=
tf
.
float32
)),
(
'block5'
,
tf
.
random_uniform
([
4
,
1
,
1
,
256
],
dtype
=
tf
.
float32
))
]
feature_map_generator
=
self
.
_build_feature_map_generator
(
image_features
=
image_features
,
depth
=
128
,
use_depthwise
=
True
,
use_native_resize_op
=
use_native_resize_op
)
def
graph_fn
():
return
feature_map_generator
(
image_features
)
self
.
execute
(
graph_fn
,
[],
g
)
expected_slim_variables
=
set
([
'projection_1/weights'
,
'projection_1/biases'
,
...
...
@@ -775,16 +723,16 @@ class FPNFeatureMapGeneratorTest(tf.test.TestCase, parameterized.TestCase):
'FeatureMaps/top_down/smoothing_3_depthwise_conv/pointwise_kernel'
,
'FeatureMaps/top_down/smoothing_3_depthwise_conv/bias'
])
init_op
=
tf
.
global_variables_initializer
()
with
self
.
test_session
()
as
sess
:
sess
.
run
(
init_op
)
sess
.
run
(
feature_maps
)
if
tf_version
.
is_tf2
():
actual_variable_set
=
set
(
[
var
.
op
.
name
for
var
in
tf
.
trainable_variables
()])
if
use_keras
:
self
.
assertSetEqual
(
expected_keras_variables
,
actual_variable_set
)
else
:
self
.
assertSetEqual
(
expected_slim_variables
,
actual_variable_set
)
[
var
.
name
.
split
(
':'
)[
0
]
for
var
in
feature_map_generator
.
variables
])
self
.
assertSetEqual
(
expected_keras_variables
,
actual_variable_set
)
else
:
with
g
.
as_default
():
actual_variable_set
=
set
(
[
var
.
op
.
name
for
var
in
tf
.
trainable_variables
()])
self
.
assertSetEqual
(
expected_slim_variables
,
actual_variable_set
)
class
GetDepthFunctionTest
(
tf
.
test
.
TestCase
):
...
...
@@ -804,6 +752,7 @@ class GetDepthFunctionTest(tf.test.TestCase):
{
'replace_pool_with_conv'
:
False
},
{
'replace_pool_with_conv'
:
True
},
)
@
unittest
.
skipIf
(
tf_version
.
is_tf2
(),
'Skipping TF1.X only test.'
)
class
PoolingPyramidFeatureMapGeneratorTest
(
tf
.
test
.
TestCase
):
def
test_get_expected_feature_map_shapes
(
self
,
replace_pool_with_conv
):
...
...
research/object_detection/models/keras_models/convert_keras_models.py
0 → 100644
View file @
420a7253
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Write keras weights into a tensorflow checkpoint.
The imagenet weights in `keras.applications` are downloaded from github.
This script converts them into the tensorflow checkpoint format and stores them
on disk where they can be easily accessible during training.
"""
from
__future__
import
print_function
import
os
from
absl
import
app
import
numpy
as
np
import
tensorflow.compat.v1
as
tf
FLAGS
=
tf
.
flags
.
FLAGS
tf
.
flags
.
DEFINE_string
(
'model'
,
'resnet_v2_101'
,
'The model to load. The following are supported: '
'"resnet_v1_50", "resnet_v1_101", "resnet_v2_50", '
'"resnet_v2_101"'
)
tf
.
flags
.
DEFINE_string
(
'output_path'
,
None
,
'The directory to output weights in.'
)
tf
.
flags
.
DEFINE_boolean
(
'verify_weights'
,
True
,
(
'Verify the weights are loaded correctly by making '
'sure the predictions are the same before and after '
'saving.'
))
def
init_model
(
name
):
"""Creates a Keras Model with the specific ResNet version."""
if
name
==
'resnet_v1_50'
:
model
=
tf
.
keras
.
applications
.
ResNet50
(
weights
=
'imagenet'
)
elif
name
==
'resnet_v1_101'
:
model
=
tf
.
keras
.
applications
.
ResNet101
(
weights
=
'imagenet'
)
elif
name
==
'resnet_v2_50'
:
model
=
tf
.
keras
.
applications
.
ResNet50V2
(
weights
=
'imagenet'
)
elif
name
==
'resnet_v2_101'
:
model
=
tf
.
keras
.
applications
.
ResNet101V2
(
weights
=
'imagenet'
)
else
:
raise
ValueError
(
'Model {} not supported'
.
format
(
FLAGS
.
model
))
return
model
def
main
(
_
):
model
=
init_model
(
FLAGS
.
model
)
path
=
os
.
path
.
join
(
FLAGS
.
output_path
,
FLAGS
.
model
)
tf
.
gfile
.
MakeDirs
(
path
)
weights_path
=
os
.
path
.
join
(
path
,
'weights'
)
ckpt
=
tf
.
train
.
Checkpoint
(
feature_extractor
=
model
)
saved_path
=
ckpt
.
save
(
weights_path
)
if
FLAGS
.
verify_weights
:
imgs
=
np
.
random
.
randn
(
1
,
224
,
224
,
3
).
astype
(
np
.
float32
)
keras_preds
=
model
(
imgs
)
model
=
init_model
(
FLAGS
.
model
)
ckpt
.
restore
(
saved_path
)
loaded_weights_pred
=
model
(
imgs
).
numpy
()
if
not
np
.
all
(
np
.
isclose
(
keras_preds
,
loaded_weights_pred
)):
raise
RuntimeError
(
'The model was not saved correctly.'
)
if
__name__
==
'__main__'
:
tf
.
enable_v2_behavior
()
app
.
run
(
main
)
research/object_detection/models/keras_models/hourglass_network_test.py
→
research/object_detection/models/keras_models/hourglass_network_
tf2_
test.py
View file @
420a7253
...
...
@@ -13,14 +13,16 @@
# limitations under the License.
# ==============================================================================
"""Testing the Hourglass network."""
import
unittest
from
absl.testing
import
parameterized
import
numpy
as
np
import
tensorflow.compat.v1
as
tf
from
object_detection.models.keras_models
import
hourglass_network
as
hourglass
from
object_detection.utils
import
tf_version
@
unittest
.
skipIf
(
tf_version
.
is_tf1
(),
'Skipping TF2.X only test.'
)
class
HourglassFeatureExtractorTest
(
tf
.
test
.
TestCase
,
parameterized
.
TestCase
):
def
test_identity_layer
(
self
):
...
...
@@ -95,5 +97,4 @@ class HourglassFeatureExtractorTest(tf.test.TestCase, parameterized.TestCase):
if
__name__
==
'__main__'
:
tf
.
enable_v2_behavior
()
tf
.
test
.
main
()
research/object_detection/models/keras_models/inception_resnet_v2_test.py
→
research/object_detection/models/keras_models/inception_resnet_v2_
tf2_
test.py
View file @
420a7253
...
...
@@ -30,13 +30,14 @@ consistent.
from
__future__
import
absolute_import
from
__future__
import
division
from
__future__
import
print_function
import
unittest
import
numpy
as
np
from
six.moves
import
zip
import
tensorflow.compat.v1
as
tf
from
object_detection.models.keras_models
import
inception_resnet_v2
from
object_detection.utils
import
test_case
from
object_detection.utils
import
tf_version
_KERAS_TO_SLIM_ENDPOINT_NAMES
=
{
'activation'
:
'Conv2d_1a_3x3'
,
...
...
@@ -100,6 +101,7 @@ _NUM_CHANNELS = 3
_BATCH_SIZE
=
2
@
unittest
.
skipIf
(
tf_version
.
is_tf1
(),
'Skipping TF2.X only test.'
)
class
InceptionResnetV2Test
(
test_case
.
TestCase
):
def
_create_application_with_layer_outputs
(
...
...
@@ -166,8 +168,7 @@ class InceptionResnetV2Test(test_case.TestCase):
model
=
self
.
_create_application_with_layer_outputs
(
layer_names
=
layer_names
,
batchnorm_training
=
False
)
preprocessed_inputs
=
tf
.
placeholder
(
tf
.
float32
,
(
4
,
None
,
None
,
_NUM_CHANNELS
))
preprocessed_inputs
=
tf
.
random
.
uniform
([
4
,
40
,
40
,
_NUM_CHANNELS
])
model
(
preprocessed_inputs
)
return
model
.
variables
...
...
research/object_detection/models/keras_models/mobilenet_v1_test.py
→
research/object_detection/models/keras_models/mobilenet_v1_
tf2_
test.py
View file @
420a7253
...
...
@@ -29,7 +29,7 @@ consistent.
from
__future__
import
absolute_import
from
__future__
import
division
from
__future__
import
print_function
import
unittest
import
numpy
as
np
from
six.moves
import
zip
import
tensorflow.compat.v1
as
tf
...
...
@@ -42,6 +42,7 @@ from object_detection.models.keras_models import model_utils
from
object_detection.models.keras_models
import
test_utils
from
object_detection.protos
import
hyperparams_pb2
from
object_detection.utils
import
test_case
from
object_detection.utils
import
tf_version
_KERAS_LAYERS_TO_CHECK
=
[
'conv1_relu'
,
...
...
@@ -64,6 +65,7 @@ _NUM_CHANNELS = 3
_BATCH_SIZE
=
2
@
unittest
.
skipIf
(
tf_version
.
is_tf1
(),
'Skipping TF2.X only test.'
)
class
MobilenetV1Test
(
test_case
.
TestCase
):
def
_build_conv_hyperparams
(
self
):
...
...
@@ -118,19 +120,17 @@ class MobilenetV1Test(test_case.TestCase):
self
,
image_height
,
image_width
,
depth_multiplier
,
expected_feature_map_shape
,
use_explicit_padding
=
False
,
min_depth
=
8
,
layer_names
=
None
,
conv_defs
=
None
):
def
graph_fn
(
image_tensor
):
model
=
self
.
_create_application_with_layer_outputs
(
layer_names
=
layer_names
,
batchnorm_training
=
False
,
use_explicit_padding
=
use_explicit_padding
,
min_depth
=
min_depth
,
alpha
=
depth_multiplier
,
conv_defs
=
conv_defs
)
return
model
(
image_tensor
)
model
=
self
.
_create_application_with_layer_outputs
(
layer_names
=
layer_names
,
batchnorm_training
=
False
,
use_explicit_padding
=
use_explicit_padding
,
min_depth
=
min_depth
,
alpha
=
depth_multiplier
,
conv_defs
=
conv_defs
)
image_tensor
=
np
.
random
.
rand
(
_BATCH_SIZE
,
image_height
,
image_width
,
_NUM_CHANNELS
).
astype
(
np
.
float32
)
feature_maps
=
self
.
execute
(
graph_fn
,
[
image_tensor
]
)
feature_maps
=
model
(
image_tensor
)
for
feature_map
,
expected_shape
in
zip
(
feature_maps
,
expected_feature_map_shape
):
...
...
@@ -140,36 +140,29 @@ class MobilenetV1Test(test_case.TestCase):
self
,
image_height
,
image_width
,
depth_multiplier
,
expected_feature_map_shape
,
use_explicit_padding
=
False
,
min_depth
=
8
,
layer_names
=
None
):
def
graph_fn
(
image_height
,
image_width
):
image_tensor
=
tf
.
random_uniform
([
_BATCH_SIZE
,
image_height
,
image_width
,
_NUM_CHANNELS
],
dtype
=
tf
.
float32
)
model
=
self
.
_create_application_with_layer_outputs
(
layer_names
=
layer_names
,
batchnorm_training
=
False
,
use_explicit_padding
=
use_explicit_padding
,
alpha
=
depth_multiplier
)
return
model
(
image_tensor
)
image_tensor
=
tf
.
random_uniform
([
_BATCH_SIZE
,
image_height
,
image_width
,
_NUM_CHANNELS
],
dtype
=
tf
.
float32
)
model
=
self
.
_create_application_with_layer_outputs
(
layer_names
=
layer_names
,
batchnorm_training
=
False
,
use_explicit_padding
=
use_explicit_padding
,
alpha
=
depth_multiplier
)
feature_maps
=
self
.
execute_cpu
(
graph_fn
,
[
np
.
array
(
image_height
,
dtype
=
np
.
int32
),
np
.
array
(
image_width
,
dtype
=
np
.
int32
)
])
feature_maps
=
model
(
image_tensor
)
for
feature_map
,
expected_shape
in
zip
(
feature_maps
,
expected_feature_map_shape
):
self
.
assertAllEqual
(
feature_map
.
shape
,
expected_shape
)
def
_get_variables
(
self
,
depth_multiplier
,
layer_names
=
None
):
g
=
tf
.
Graph
()
with
g
.
as_default
():
preprocessed_inputs
=
tf
.
placeholder
(
tf
.
float32
,
(
4
,
None
,
None
,
_NUM_CHANNELS
))
model
=
self
.
_create_application_with_layer_outputs
(
layer_names
=
layer_names
,
batchnorm_training
=
False
,
use_explicit_padding
=
False
,
alpha
=
depth_multiplier
)
model
(
preprocessed_inputs
)
return
g
.
get_collection
(
tf
.
GraphKeys
.
GLOBAL_VARIABLES
)
tf
.
keras
.
backend
.
clear_session
()
model
=
self
.
_create_application_with_layer_outputs
(
layer_names
=
layer_names
,
batchnorm_training
=
False
,
use_explicit_padding
=
False
,
alpha
=
depth_multiplier
)
preprocessed_inputs
=
tf
.
random
.
uniform
([
2
,
40
,
40
,
3
])
model
(
preprocessed_inputs
)
return
model
.
variables
def
test_returns_correct_shapes_128
(
self
):
image_height
=
128
...
...
research/object_detection/models/keras_models/mobilenet_v2_test.py
→
research/object_detection/models/keras_models/mobilenet_v2_
tf2_
test.py
View file @
420a7253
...
...
@@ -18,7 +18,7 @@
from
__future__
import
absolute_import
from
__future__
import
division
from
__future__
import
print_function
import
unittest
import
numpy
as
np
from
six.moves
import
zip
import
tensorflow.compat.v1
as
tf
...
...
@@ -31,6 +31,7 @@ from object_detection.models.keras_models import model_utils
from
object_detection.models.keras_models
import
test_utils
from
object_detection.protos
import
hyperparams_pb2
from
object_detection.utils
import
test_case
from
object_detection.utils
import
tf_version
_layers_to_check
=
[
'Conv1_relu'
,
...
...
@@ -53,6 +54,7 @@ _layers_to_check = [
'out_relu'
]
@
unittest
.
skipIf
(
tf_version
.
is_tf1
(),
'Skipping TF2.X only test.'
)
class
MobilenetV2Test
(
test_case
.
TestCase
):
def
_build_conv_hyperparams
(
self
):
...
...
@@ -86,6 +88,8 @@ class MobilenetV2Test(test_case.TestCase):
min_depth
=
None
,
conv_defs
=
None
):
"""Constructs Keras mobilenetv2 that extracts intermediate layer outputs."""
# Have to clear the Keras backend to ensure isolation in layer naming
tf
.
keras
.
backend
.
clear_session
()
if
not
layer_names
:
layer_names
=
_layers_to_check
full_model
=
mobilenet_v2
.
mobilenet_v2
(
...
...
@@ -107,19 +111,17 @@ class MobilenetV2Test(test_case.TestCase):
self
,
batch_size
,
image_height
,
image_width
,
depth_multiplier
,
expected_feature_map_shapes
,
use_explicit_padding
=
False
,
min_depth
=
None
,
layer_names
=
None
,
conv_defs
=
None
):
def
graph_fn
(
image_tensor
):
model
=
self
.
_create_application_with_layer_outputs
(
layer_names
=
layer_names
,
batchnorm_training
=
False
,
use_explicit_padding
=
use_explicit_padding
,
min_depth
=
min_depth
,
alpha
=
depth_multiplier
,
conv_defs
=
conv_defs
)
return
model
(
image_tensor
)
model
=
self
.
_create_application_with_layer_outputs
(
layer_names
=
layer_names
,
batchnorm_training
=
False
,
use_explicit_padding
=
use_explicit_padding
,
min_depth
=
min_depth
,
alpha
=
depth_multiplier
,
conv_defs
=
conv_defs
)
image_tensor
=
np
.
random
.
rand
(
batch_size
,
image_height
,
image_width
,
3
).
astype
(
np
.
float32
)
feature_maps
=
self
.
execute
(
graph_fn
,
[
image_tensor
])
feature_maps
=
model
(
[
image_tensor
])
for
feature_map
,
expected_shape
in
zip
(
feature_maps
,
expected_feature_map_shapes
):
...
...
@@ -129,34 +131,30 @@ class MobilenetV2Test(test_case.TestCase):
self
,
batch_size
,
image_height
,
image_width
,
depth_multiplier
,
expected_feature_map_shapes
,
use_explicit_padding
=
False
,
layer_names
=
None
):
def
graph_fn
(
image_height
,
image_width
):
image_tensor
=
tf
.
random_uniform
([
batch_size
,
image_height
,
image_width
,
3
],
dtype
=
tf
.
float32
)
model
=
self
.
_create_application_with_layer_outputs
(
layer_names
=
layer_names
,
batchnorm_training
=
False
,
use_explicit_padding
=
use_explicit_padding
,
alpha
=
depth_multiplier
)
return
model
(
image_tensor
)
feature_maps
=
self
.
execute_cpu
(
graph_fn
,
[
np
.
array
(
image_height
,
dtype
=
np
.
int32
),
np
.
array
(
image_width
,
dtype
=
np
.
int32
)
])
height
=
tf
.
random
.
uniform
([],
minval
=
image_height
,
maxval
=
image_height
+
1
,
dtype
=
tf
.
int32
)
width
=
tf
.
random
.
uniform
([],
minval
=
image_width
,
maxval
=
image_width
+
1
,
dtype
=
tf
.
int32
)
image_tensor
=
tf
.
random
.
uniform
([
batch_size
,
height
,
width
,
3
],
dtype
=
tf
.
float32
)
model
=
self
.
_create_application_with_layer_outputs
(
layer_names
=
layer_names
,
batchnorm_training
=
False
,
use_explicit_padding
=
use_explicit_padding
,
alpha
=
depth_multiplier
)
feature_maps
=
model
(
image_tensor
)
for
feature_map
,
expected_shape
in
zip
(
feature_maps
,
expected_feature_map_shapes
):
self
.
assertAllEqual
(
feature_map
.
shape
,
expected_shape
)
def
_get_variables
(
self
,
depth_multiplier
,
layer_names
=
None
):
g
=
tf
.
Graph
()
with
g
.
as_default
():
preprocessed_inputs
=
tf
.
placeholder
(
tf
.
float32
,
(
4
,
None
,
None
,
3
))
model
=
self
.
_create_application_with_layer_outputs
(
layer_names
=
layer_names
,
batchnorm_training
=
False
,
use_explicit_padding
=
False
,
alpha
=
depth_multiplier
)
model
(
preprocessed_inputs
)
return
g
.
get_collection
(
tf
.
GraphKeys
.
GLOBAL_VARIABLES
)
tf
.
keras
.
backend
.
clear_session
()
model
=
self
.
_create_application_with_layer_outputs
(
layer_names
=
layer_names
,
batchnorm_training
=
False
,
use_explicit_padding
=
False
,
alpha
=
depth_multiplier
)
preprocessed_inputs
=
tf
.
random
.
uniform
([
2
,
40
,
40
,
3
])
model
(
preprocessed_inputs
)
return
model
.
variables
def
test_returns_correct_shapes_128
(
self
):
image_height
=
128
...
...
research/object_detection/models/keras_models/resnet_v1_test.py
→
research/object_detection/models/keras_models/resnet_v1_
tf2_
test.py
View file @
420a7253
...
...
@@ -19,7 +19,7 @@ object detection. To verify the consistency of the two models, we compare:
1. Output shape of each layer given different inputs.
2. Number of global variables.
"""
import
unittest
import
numpy
as
np
from
six.moves
import
zip
import
tensorflow.compat.v1
as
tf
...
...
@@ -30,6 +30,7 @@ from object_detection.builders import hyperparams_builder
from
object_detection.models.keras_models
import
resnet_v1
from
object_detection.protos
import
hyperparams_pb2
from
object_detection.utils
import
test_case
from
object_detection.utils
import
tf_version
_EXPECTED_SHAPES_224_RESNET50
=
{
'conv2_block3_out'
:
(
4
,
56
,
56
,
256
),
...
...
@@ -65,6 +66,7 @@ _NUM_CHANNELS = 3
_BATCH_SIZE
=
4
@
unittest
.
skipIf
(
tf_version
.
is_tf1
(),
'Skipping TF2.X only test.'
)
class
ResnetV1Test
(
test_case
.
TestCase
):
def
_build_conv_hyperparams
(
self
):
...
...
@@ -146,8 +148,7 @@ class ResnetV1Test(test_case.TestCase):
tf
.
keras
.
backend
.
clear_session
()
model
=
self
.
_create_application_with_layer_outputs
(
model_index
,
batchnorm_training
=
False
)
preprocessed_inputs
=
tf
.
placeholder
(
tf
.
float32
,
(
4
,
None
,
None
,
_NUM_CHANNELS
))
preprocessed_inputs
=
tf
.
random
.
uniform
([
2
,
40
,
40
,
_NUM_CHANNELS
])
model
(
preprocessed_inputs
)
return
model
.
variables
...
...
Prev
1
2
3
4
5
6
7
8
9
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment