Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ModelZoo
ResNet50_tensorflow
Commits
abd50423
Unverified
Commit
abd50423
authored
Apr 03, 2018
by
pkulzc
Committed by
GitHub
Apr 03, 2018
Browse files
Merge pull request #3846 from pkulzc/master
Internal changes for object detection
parents
c3b26603
143464d2
Changes
40
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
380 additions
and
62 deletions
+380
-62
research/object_detection/models/faster_rcnn_pnas_feature_extractor_test.py
...tection/models/faster_rcnn_pnas_feature_extractor_test.py
+122
-0
research/object_detection/models/ssd_inception_v2_feature_extractor.py
...ct_detection/models/ssd_inception_v2_feature_extractor.py
+9
-3
research/object_detection/models/ssd_inception_v3_feature_extractor.py
...ct_detection/models/ssd_inception_v3_feature_extractor.py
+9
-3
research/object_detection/models/ssd_mobilenet_v1_feature_extractor.py
...ct_detection/models/ssd_mobilenet_v1_feature_extractor.py
+9
-3
research/object_detection/models/ssd_mobilenet_v2_feature_extractor.py
...ct_detection/models/ssd_mobilenet_v2_feature_extractor.py
+9
-3
research/object_detection/models/ssd_resnet_v1_fpn_feature_extractor.py
...t_detection/models/ssd_resnet_v1_fpn_feature_extractor.py
+36
-9
research/object_detection/protos/eval.proto
research/object_detection/protos/eval.proto
+4
-0
research/object_detection/protos/faster_rcnn.proto
research/object_detection/protos/faster_rcnn.proto
+6
-0
research/object_detection/protos/input_reader.proto
research/object_detection/protos/input_reader.proto
+3
-0
research/object_detection/protos/ssd.proto
research/object_detection/protos/ssd.proto
+6
-0
research/object_detection/protos/train.proto
research/object_detection/protos/train.proto
+5
-0
research/object_detection/trainer.py
research/object_detection/trainer.py
+23
-23
research/object_detection/utils/config_util.py
research/object_detection/utils/config_util.py
+21
-0
research/object_detection/utils/config_util_test.py
research/object_detection/utils/config_util_test.py
+17
-0
research/object_detection/utils/context_manager.py
research/object_detection/utils/context_manager.py
+40
-0
research/object_detection/utils/context_manager_test.py
research/object_detection/utils/context_manager_test.py
+33
-0
research/object_detection/utils/dataset_util.py
research/object_detection/utils/dataset_util.py
+6
-1
research/object_detection/utils/ops.py
research/object_detection/utils/ops.py
+14
-16
research/object_detection/utils/ops_test.py
research/object_detection/utils/ops_test.py
+7
-0
research/object_detection/utils/variables_helper.py
research/object_detection/utils/variables_helper.py
+1
-1
No files found.
research/object_detection/models/faster_rcnn_pnas_feature_extractor_test.py
0 → 100644
View file @
abd50423
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for models.faster_rcnn_pnas_feature_extractor."""
import
tensorflow
as
tf
from
object_detection.models
import
faster_rcnn_pnas_feature_extractor
as
frcnn_pnas
class
FasterRcnnPNASFeatureExtractorTest
(
tf
.
test
.
TestCase
):
def
_build_feature_extractor
(
self
,
first_stage_features_stride
):
return
frcnn_pnas
.
FasterRCNNPNASFeatureExtractor
(
is_training
=
False
,
first_stage_features_stride
=
first_stage_features_stride
,
batch_norm_trainable
=
False
,
reuse_weights
=
None
,
weight_decay
=
0.0
)
def
test_extract_proposal_features_returns_expected_size
(
self
):
feature_extractor
=
self
.
_build_feature_extractor
(
first_stage_features_stride
=
16
)
preprocessed_inputs
=
tf
.
random_uniform
(
[
1
,
299
,
299
,
3
],
maxval
=
255
,
dtype
=
tf
.
float32
)
rpn_feature_map
,
_
=
feature_extractor
.
extract_proposal_features
(
preprocessed_inputs
,
scope
=
'TestScope'
)
features_shape
=
tf
.
shape
(
rpn_feature_map
)
init_op
=
tf
.
global_variables_initializer
()
with
self
.
test_session
()
as
sess
:
sess
.
run
(
init_op
)
features_shape_out
=
sess
.
run
(
features_shape
)
self
.
assertAllEqual
(
features_shape_out
,
[
1
,
19
,
19
,
4320
])
def
test_extract_proposal_features_input_size_224
(
self
):
feature_extractor
=
self
.
_build_feature_extractor
(
first_stage_features_stride
=
16
)
preprocessed_inputs
=
tf
.
random_uniform
(
[
1
,
224
,
224
,
3
],
maxval
=
255
,
dtype
=
tf
.
float32
)
rpn_feature_map
,
_
=
feature_extractor
.
extract_proposal_features
(
preprocessed_inputs
,
scope
=
'TestScope'
)
features_shape
=
tf
.
shape
(
rpn_feature_map
)
init_op
=
tf
.
global_variables_initializer
()
with
self
.
test_session
()
as
sess
:
sess
.
run
(
init_op
)
features_shape_out
=
sess
.
run
(
features_shape
)
self
.
assertAllEqual
(
features_shape_out
,
[
1
,
14
,
14
,
4320
])
def
test_extract_proposal_features_input_size_112
(
self
):
feature_extractor
=
self
.
_build_feature_extractor
(
first_stage_features_stride
=
16
)
preprocessed_inputs
=
tf
.
random_uniform
(
[
1
,
112
,
112
,
3
],
maxval
=
255
,
dtype
=
tf
.
float32
)
rpn_feature_map
,
_
=
feature_extractor
.
extract_proposal_features
(
preprocessed_inputs
,
scope
=
'TestScope'
)
features_shape
=
tf
.
shape
(
rpn_feature_map
)
init_op
=
tf
.
global_variables_initializer
()
with
self
.
test_session
()
as
sess
:
sess
.
run
(
init_op
)
features_shape_out
=
sess
.
run
(
features_shape
)
self
.
assertAllEqual
(
features_shape_out
,
[
1
,
7
,
7
,
4320
])
def
test_extract_proposal_features_dies_on_invalid_stride
(
self
):
with
self
.
assertRaises
(
ValueError
):
self
.
_build_feature_extractor
(
first_stage_features_stride
=
99
)
def
test_extract_proposal_features_dies_with_incorrect_rank_inputs
(
self
):
feature_extractor
=
self
.
_build_feature_extractor
(
first_stage_features_stride
=
16
)
preprocessed_inputs
=
tf
.
random_uniform
(
[
224
,
224
,
3
],
maxval
=
255
,
dtype
=
tf
.
float32
)
with
self
.
assertRaises
(
ValueError
):
feature_extractor
.
extract_proposal_features
(
preprocessed_inputs
,
scope
=
'TestScope'
)
def
test_extract_box_classifier_features_returns_expected_size
(
self
):
feature_extractor
=
self
.
_build_feature_extractor
(
first_stage_features_stride
=
16
)
proposal_feature_maps
=
tf
.
random_uniform
(
[
2
,
17
,
17
,
1088
],
maxval
=
255
,
dtype
=
tf
.
float32
)
proposal_classifier_features
=
(
feature_extractor
.
extract_box_classifier_features
(
proposal_feature_maps
,
scope
=
'TestScope'
))
features_shape
=
tf
.
shape
(
proposal_classifier_features
)
init_op
=
tf
.
global_variables_initializer
()
with
self
.
test_session
()
as
sess
:
sess
.
run
(
init_op
)
features_shape_out
=
sess
.
run
(
features_shape
)
self
.
assertAllEqual
(
features_shape_out
,
[
2
,
9
,
9
,
4320
])
def
test_filter_scaling_computation
(
self
):
expected_filter_scaling
=
{
((
4
,
8
),
2
):
1.0
,
((
4
,
8
),
7
):
2.0
,
((
4
,
8
),
8
):
2.0
,
((
4
,
8
),
9
):
4.0
}
for
args
,
filter_scaling
in
expected_filter_scaling
.
items
():
reduction_indices
,
start_cell_num
=
args
self
.
assertAlmostEqual
(
frcnn_pnas
.
_filter_scaling
(
reduction_indices
,
start_cell_num
),
filter_scaling
)
if
__name__
==
'__main__'
:
tf
.
test
.
main
()
research/object_detection/models/ssd_inception_v2_feature_extractor.py
View file @
abd50423
...
@@ -37,7 +37,8 @@ class SSDInceptionV2FeatureExtractor(ssd_meta_arch.SSDFeatureExtractor):
...
@@ -37,7 +37,8 @@ class SSDInceptionV2FeatureExtractor(ssd_meta_arch.SSDFeatureExtractor):
batch_norm_trainable
=
True
,
batch_norm_trainable
=
True
,
reuse_weights
=
None
,
reuse_weights
=
None
,
use_explicit_padding
=
False
,
use_explicit_padding
=
False
,
use_depthwise
=
False
):
use_depthwise
=
False
,
inplace_batchnorm_update
=
False
):
"""InceptionV2 Feature Extractor for SSD Models.
"""InceptionV2 Feature Extractor for SSD Models.
Args:
Args:
...
@@ -55,11 +56,16 @@ class SSDInceptionV2FeatureExtractor(ssd_meta_arch.SSDFeatureExtractor):
...
@@ -55,11 +56,16 @@ class SSDInceptionV2FeatureExtractor(ssd_meta_arch.SSDFeatureExtractor):
use_explicit_padding: Whether to use explicit padding when extracting
use_explicit_padding: Whether to use explicit padding when extracting
features. Default is False.
features. Default is False.
use_depthwise: Whether to use depthwise convolutions. Default is False.
use_depthwise: Whether to use depthwise convolutions. Default is False.
inplace_batchnorm_update: Whether to update batch_norm inplace during
training. This is required for batch norm to work correctly on TPUs.
When this is false, user must add a control dependency on
tf.GraphKeys.UPDATE_OPS for train/loss op in order to update the batch
norm moving average parameters.
"""
"""
super
(
SSDInceptionV2FeatureExtractor
,
self
).
__init__
(
super
(
SSDInceptionV2FeatureExtractor
,
self
).
__init__
(
is_training
,
depth_multiplier
,
min_depth
,
pad_to_multiple
,
is_training
,
depth_multiplier
,
min_depth
,
pad_to_multiple
,
conv_hyperparams
,
batch_norm_trainable
,
reuse_weights
,
conv_hyperparams
,
batch_norm_trainable
,
reuse_weights
,
use_explicit_padding
,
use_depthwise
)
use_explicit_padding
,
use_depthwise
,
inplace_batchnorm_update
)
def
preprocess
(
self
,
resized_inputs
):
def
preprocess
(
self
,
resized_inputs
):
"""SSD preprocessing.
"""SSD preprocessing.
...
@@ -76,7 +82,7 @@ class SSDInceptionV2FeatureExtractor(ssd_meta_arch.SSDFeatureExtractor):
...
@@ -76,7 +82,7 @@ class SSDInceptionV2FeatureExtractor(ssd_meta_arch.SSDFeatureExtractor):
"""
"""
return
(
2.0
/
255.0
)
*
resized_inputs
-
1.0
return
(
2.0
/
255.0
)
*
resized_inputs
-
1.0
def
extract_features
(
self
,
preprocessed_inputs
):
def
_
extract_features
(
self
,
preprocessed_inputs
):
"""Extract features from preprocessed inputs.
"""Extract features from preprocessed inputs.
Args:
Args:
...
...
research/object_detection/models/ssd_inception_v3_feature_extractor.py
View file @
abd50423
...
@@ -37,7 +37,8 @@ class SSDInceptionV3FeatureExtractor(ssd_meta_arch.SSDFeatureExtractor):
...
@@ -37,7 +37,8 @@ class SSDInceptionV3FeatureExtractor(ssd_meta_arch.SSDFeatureExtractor):
batch_norm_trainable
=
True
,
batch_norm_trainable
=
True
,
reuse_weights
=
None
,
reuse_weights
=
None
,
use_explicit_padding
=
False
,
use_explicit_padding
=
False
,
use_depthwise
=
False
):
use_depthwise
=
False
,
inplace_batchnorm_update
=
False
):
"""InceptionV3 Feature Extractor for SSD Models.
"""InceptionV3 Feature Extractor for SSD Models.
Args:
Args:
...
@@ -55,11 +56,16 @@ class SSDInceptionV3FeatureExtractor(ssd_meta_arch.SSDFeatureExtractor):
...
@@ -55,11 +56,16 @@ class SSDInceptionV3FeatureExtractor(ssd_meta_arch.SSDFeatureExtractor):
use_explicit_padding: Whether to use explicit padding when extracting
use_explicit_padding: Whether to use explicit padding when extracting
features. Default is False.
features. Default is False.
use_depthwise: Whether to use depthwise convolutions. Default is False.
use_depthwise: Whether to use depthwise convolutions. Default is False.
inplace_batchnorm_update: Whether to update batch_norm inplace during
training. This is required for batch norm to work correctly on TPUs.
When this is false, user must add a control dependency on
tf.GraphKeys.UPDATE_OPS for train/loss op in order to update the batch
norm moving average parameters.
"""
"""
super
(
SSDInceptionV3FeatureExtractor
,
self
).
__init__
(
super
(
SSDInceptionV3FeatureExtractor
,
self
).
__init__
(
is_training
,
depth_multiplier
,
min_depth
,
pad_to_multiple
,
is_training
,
depth_multiplier
,
min_depth
,
pad_to_multiple
,
conv_hyperparams
,
batch_norm_trainable
,
reuse_weights
,
conv_hyperparams
,
batch_norm_trainable
,
reuse_weights
,
use_explicit_padding
,
use_depthwise
)
use_explicit_padding
,
use_depthwise
,
inplace_batchnorm_update
)
def
preprocess
(
self
,
resized_inputs
):
def
preprocess
(
self
,
resized_inputs
):
"""SSD preprocessing.
"""SSD preprocessing.
...
@@ -76,7 +82,7 @@ class SSDInceptionV3FeatureExtractor(ssd_meta_arch.SSDFeatureExtractor):
...
@@ -76,7 +82,7 @@ class SSDInceptionV3FeatureExtractor(ssd_meta_arch.SSDFeatureExtractor):
"""
"""
return
(
2.0
/
255.0
)
*
resized_inputs
-
1.0
return
(
2.0
/
255.0
)
*
resized_inputs
-
1.0
def
extract_features
(
self
,
preprocessed_inputs
):
def
_
extract_features
(
self
,
preprocessed_inputs
):
"""Extract features from preprocessed inputs.
"""Extract features from preprocessed inputs.
Args:
Args:
...
...
research/object_detection/models/ssd_mobilenet_v1_feature_extractor.py
View file @
abd50423
...
@@ -38,7 +38,8 @@ class SSDMobileNetV1FeatureExtractor(ssd_meta_arch.SSDFeatureExtractor):
...
@@ -38,7 +38,8 @@ class SSDMobileNetV1FeatureExtractor(ssd_meta_arch.SSDFeatureExtractor):
batch_norm_trainable
=
True
,
batch_norm_trainable
=
True
,
reuse_weights
=
None
,
reuse_weights
=
None
,
use_explicit_padding
=
False
,
use_explicit_padding
=
False
,
use_depthwise
=
False
):
use_depthwise
=
False
,
inplace_batchnorm_update
=
False
):
"""MobileNetV1 Feature Extractor for SSD Models.
"""MobileNetV1 Feature Extractor for SSD Models.
Args:
Args:
...
@@ -57,11 +58,16 @@ class SSDMobileNetV1FeatureExtractor(ssd_meta_arch.SSDFeatureExtractor):
...
@@ -57,11 +58,16 @@ class SSDMobileNetV1FeatureExtractor(ssd_meta_arch.SSDFeatureExtractor):
inputs so that the output dimensions are the same as if 'SAME' padding
inputs so that the output dimensions are the same as if 'SAME' padding
were used.
were used.
use_depthwise: Whether to use depthwise convolutions. Default is False.
use_depthwise: Whether to use depthwise convolutions. Default is False.
inplace_batchnorm_update: Whether to update batch_norm inplace during
training. This is required for batch norm to work correctly on TPUs.
When this is false, user must add a control dependency on
tf.GraphKeys.UPDATE_OPS for train/loss op in order to update the batch
norm moving average parameters.
"""
"""
super
(
SSDMobileNetV1FeatureExtractor
,
self
).
__init__
(
super
(
SSDMobileNetV1FeatureExtractor
,
self
).
__init__
(
is_training
,
depth_multiplier
,
min_depth
,
pad_to_multiple
,
is_training
,
depth_multiplier
,
min_depth
,
pad_to_multiple
,
conv_hyperparams
,
batch_norm_trainable
,
reuse_weights
,
conv_hyperparams
,
batch_norm_trainable
,
reuse_weights
,
use_explicit_padding
,
use_depthwise
)
use_explicit_padding
,
use_depthwise
,
inplace_batchnorm_update
)
def
preprocess
(
self
,
resized_inputs
):
def
preprocess
(
self
,
resized_inputs
):
"""SSD preprocessing.
"""SSD preprocessing.
...
@@ -78,7 +84,7 @@ class SSDMobileNetV1FeatureExtractor(ssd_meta_arch.SSDFeatureExtractor):
...
@@ -78,7 +84,7 @@ class SSDMobileNetV1FeatureExtractor(ssd_meta_arch.SSDFeatureExtractor):
"""
"""
return
(
2.0
/
255.0
)
*
resized_inputs
-
1.0
return
(
2.0
/
255.0
)
*
resized_inputs
-
1.0
def
extract_features
(
self
,
preprocessed_inputs
):
def
_
extract_features
(
self
,
preprocessed_inputs
):
"""Extract features from preprocessed inputs.
"""Extract features from preprocessed inputs.
Args:
Args:
...
...
research/object_detection/models/ssd_mobilenet_v2_feature_extractor.py
View file @
abd50423
...
@@ -39,7 +39,8 @@ class SSDMobileNetV2FeatureExtractor(ssd_meta_arch.SSDFeatureExtractor):
...
@@ -39,7 +39,8 @@ class SSDMobileNetV2FeatureExtractor(ssd_meta_arch.SSDFeatureExtractor):
batch_norm_trainable
=
True
,
batch_norm_trainable
=
True
,
reuse_weights
=
None
,
reuse_weights
=
None
,
use_explicit_padding
=
False
,
use_explicit_padding
=
False
,
use_depthwise
=
False
):
use_depthwise
=
False
,
inplace_batchnorm_update
=
False
):
"""MobileNetV2 Feature Extractor for SSD Models.
"""MobileNetV2 Feature Extractor for SSD Models.
Mobilenet v2 (experimental), designed by sandler@. More details can be found
Mobilenet v2 (experimental), designed by sandler@. More details can be found
...
@@ -60,11 +61,16 @@ class SSDMobileNetV2FeatureExtractor(ssd_meta_arch.SSDFeatureExtractor):
...
@@ -60,11 +61,16 @@ class SSDMobileNetV2FeatureExtractor(ssd_meta_arch.SSDFeatureExtractor):
use_explicit_padding: Whether to use explicit padding when extracting
use_explicit_padding: Whether to use explicit padding when extracting
features. Default is False.
features. Default is False.
use_depthwise: Whether to use depthwise convolutions. Default is False.
use_depthwise: Whether to use depthwise convolutions. Default is False.
inplace_batchnorm_update: Whether to update batch_norm inplace during
training. This is required for batch norm to work correctly on TPUs.
When this is false, user must add a control dependency on
tf.GraphKeys.UPDATE_OPS for train/loss op in order to update the batch
norm moving average parameters.
"""
"""
super
(
SSDMobileNetV2FeatureExtractor
,
self
).
__init__
(
super
(
SSDMobileNetV2FeatureExtractor
,
self
).
__init__
(
is_training
,
depth_multiplier
,
min_depth
,
pad_to_multiple
,
is_training
,
depth_multiplier
,
min_depth
,
pad_to_multiple
,
conv_hyperparams
,
batch_norm_trainable
,
reuse_weights
,
conv_hyperparams
,
batch_norm_trainable
,
reuse_weights
,
use_explicit_padding
,
use_depthwise
)
use_explicit_padding
,
use_depthwise
,
inplace_batchnorm_update
)
def
preprocess
(
self
,
resized_inputs
):
def
preprocess
(
self
,
resized_inputs
):
"""SSD preprocessing.
"""SSD preprocessing.
...
@@ -81,7 +87,7 @@ class SSDMobileNetV2FeatureExtractor(ssd_meta_arch.SSDFeatureExtractor):
...
@@ -81,7 +87,7 @@ class SSDMobileNetV2FeatureExtractor(ssd_meta_arch.SSDFeatureExtractor):
"""
"""
return
(
2.0
/
255.0
)
*
resized_inputs
-
1.0
return
(
2.0
/
255.0
)
*
resized_inputs
-
1.0
def
extract_features
(
self
,
preprocessed_inputs
):
def
_
extract_features
(
self
,
preprocessed_inputs
):
"""Extract features from preprocessed inputs.
"""Extract features from preprocessed inputs.
Args:
Args:
...
...
research/object_detection/models/ssd_resnet_v1_fpn_feature_extractor.py
View file @
abd50423
...
@@ -43,7 +43,8 @@ class _SSDResnetV1FpnFeatureExtractor(ssd_meta_arch.SSDFeatureExtractor):
...
@@ -43,7 +43,8 @@ class _SSDResnetV1FpnFeatureExtractor(ssd_meta_arch.SSDFeatureExtractor):
batch_norm_trainable
=
True
,
batch_norm_trainable
=
True
,
reuse_weights
=
None
,
reuse_weights
=
None
,
use_explicit_padding
=
False
,
use_explicit_padding
=
False
,
use_depthwise
=
False
):
use_depthwise
=
False
,
inplace_batchnorm_update
=
False
):
"""SSD FPN feature extractor based on Resnet v1 architecture.
"""SSD FPN feature extractor based on Resnet v1 architecture.
Args:
Args:
...
@@ -66,6 +67,11 @@ class _SSDResnetV1FpnFeatureExtractor(ssd_meta_arch.SSDFeatureExtractor):
...
@@ -66,6 +67,11 @@ class _SSDResnetV1FpnFeatureExtractor(ssd_meta_arch.SSDFeatureExtractor):
use_explicit_padding: Whether to use explicit padding when extracting
use_explicit_padding: Whether to use explicit padding when extracting
features. Default is False. UNUSED currently.
features. Default is False. UNUSED currently.
use_depthwise: Whether to use depthwise convolutions. UNUSED currently.
use_depthwise: Whether to use depthwise convolutions. UNUSED currently.
inplace_batchnorm_update: Whether to update batch_norm inplace during
training. This is required for batch norm to work correctly on TPUs.
When this is false, user must add a control dependency on
tf.GraphKeys.UPDATE_OPS for train/loss op in order to update the batch
norm moving average parameters.
Raises:
Raises:
ValueError: On supplying invalid arguments for unused arguments.
ValueError: On supplying invalid arguments for unused arguments.
...
@@ -73,7 +79,7 @@ class _SSDResnetV1FpnFeatureExtractor(ssd_meta_arch.SSDFeatureExtractor):
...
@@ -73,7 +79,7 @@ class _SSDResnetV1FpnFeatureExtractor(ssd_meta_arch.SSDFeatureExtractor):
super
(
_SSDResnetV1FpnFeatureExtractor
,
self
).
__init__
(
super
(
_SSDResnetV1FpnFeatureExtractor
,
self
).
__init__
(
is_training
,
depth_multiplier
,
min_depth
,
pad_to_multiple
,
is_training
,
depth_multiplier
,
min_depth
,
pad_to_multiple
,
conv_hyperparams
,
batch_norm_trainable
,
reuse_weights
,
conv_hyperparams
,
batch_norm_trainable
,
reuse_weights
,
use_explicit_padding
)
use_explicit_padding
,
inplace_batchnorm_update
)
if
self
.
_depth_multiplier
!=
1.0
:
if
self
.
_depth_multiplier
!=
1.0
:
raise
ValueError
(
'Only depth 1.0 is supported, found: {}'
.
raise
ValueError
(
'Only depth 1.0 is supported, found: {}'
.
format
(
self
.
_depth_multiplier
))
format
(
self
.
_depth_multiplier
))
...
@@ -110,7 +116,7 @@ class _SSDResnetV1FpnFeatureExtractor(ssd_meta_arch.SSDFeatureExtractor):
...
@@ -110,7 +116,7 @@ class _SSDResnetV1FpnFeatureExtractor(ssd_meta_arch.SSDFeatureExtractor):
filtered_image_features
[
feature_name
]
=
feature
filtered_image_features
[
feature_name
]
=
feature
return
filtered_image_features
return
filtered_image_features
def
extract_features
(
self
,
preprocessed_inputs
):
def
_
extract_features
(
self
,
preprocessed_inputs
):
"""Extract features from preprocessed inputs.
"""Extract features from preprocessed inputs.
Args:
Args:
...
@@ -176,7 +182,8 @@ class SSDResnet50V1FpnFeatureExtractor(_SSDResnetV1FpnFeatureExtractor):
...
@@ -176,7 +182,8 @@ class SSDResnet50V1FpnFeatureExtractor(_SSDResnetV1FpnFeatureExtractor):
batch_norm_trainable
=
True
,
batch_norm_trainable
=
True
,
reuse_weights
=
None
,
reuse_weights
=
None
,
use_explicit_padding
=
False
,
use_explicit_padding
=
False
,
use_depthwise
=
False
):
use_depthwise
=
False
,
inplace_batchnorm_update
=
False
):
"""Resnet50 v1 FPN Feature Extractor for SSD Models.
"""Resnet50 v1 FPN Feature Extractor for SSD Models.
Args:
Args:
...
@@ -194,11 +201,17 @@ class SSDResnet50V1FpnFeatureExtractor(_SSDResnetV1FpnFeatureExtractor):
...
@@ -194,11 +201,17 @@ class SSDResnet50V1FpnFeatureExtractor(_SSDResnetV1FpnFeatureExtractor):
use_explicit_padding: Whether to use explicit padding when extracting
use_explicit_padding: Whether to use explicit padding when extracting
features. Default is False. UNUSED currently.
features. Default is False. UNUSED currently.
use_depthwise: Whether to use depthwise convolutions. UNUSED currently.
use_depthwise: Whether to use depthwise convolutions. UNUSED currently.
inplace_batchnorm_update: Whether to update batch_norm inplace during
training. This is required for batch norm to work correctly on TPUs.
When this is false, user must add a control dependency on
tf.GraphKeys.UPDATE_OPS for train/loss op in order to update the batch
norm moving average parameters.
"""
"""
super
(
SSDResnet50V1FpnFeatureExtractor
,
self
).
__init__
(
super
(
SSDResnet50V1FpnFeatureExtractor
,
self
).
__init__
(
is_training
,
depth_multiplier
,
min_depth
,
pad_to_multiple
,
is_training
,
depth_multiplier
,
min_depth
,
pad_to_multiple
,
conv_hyperparams
,
resnet_v1
.
resnet_v1_50
,
'resnet_v1_50'
,
'fpn'
,
conv_hyperparams
,
resnet_v1
.
resnet_v1_50
,
'resnet_v1_50'
,
'fpn'
,
batch_norm_trainable
,
reuse_weights
,
use_explicit_padding
)
batch_norm_trainable
,
reuse_weights
,
use_explicit_padding
,
inplace_batchnorm_update
)
class
SSDResnet101V1FpnFeatureExtractor
(
_SSDResnetV1FpnFeatureExtractor
):
class
SSDResnet101V1FpnFeatureExtractor
(
_SSDResnetV1FpnFeatureExtractor
):
...
@@ -212,7 +225,8 @@ class SSDResnet101V1FpnFeatureExtractor(_SSDResnetV1FpnFeatureExtractor):
...
@@ -212,7 +225,8 @@ class SSDResnet101V1FpnFeatureExtractor(_SSDResnetV1FpnFeatureExtractor):
batch_norm_trainable
=
True
,
batch_norm_trainable
=
True
,
reuse_weights
=
None
,
reuse_weights
=
None
,
use_explicit_padding
=
False
,
use_explicit_padding
=
False
,
use_depthwise
=
False
):
use_depthwise
=
False
,
inplace_batchnorm_update
=
False
):
"""Resnet101 v1 FPN Feature Extractor for SSD Models.
"""Resnet101 v1 FPN Feature Extractor for SSD Models.
Args:
Args:
...
@@ -230,11 +244,17 @@ class SSDResnet101V1FpnFeatureExtractor(_SSDResnetV1FpnFeatureExtractor):
...
@@ -230,11 +244,17 @@ class SSDResnet101V1FpnFeatureExtractor(_SSDResnetV1FpnFeatureExtractor):
use_explicit_padding: Whether to use explicit padding when extracting
use_explicit_padding: Whether to use explicit padding when extracting
features. Default is False. UNUSED currently.
features. Default is False. UNUSED currently.
use_depthwise: Whether to use depthwise convolutions. UNUSED currently.
use_depthwise: Whether to use depthwise convolutions. UNUSED currently.
inplace_batchnorm_update: Whether to update batch_norm inplace during
training. This is required for batch norm to work correctly on TPUs.
When this is false, user must add a control dependency on
tf.GraphKeys.UPDATE_OPS for train/loss op in order to update the batch
norm moving average parameters.
"""
"""
super
(
SSDResnet101V1FpnFeatureExtractor
,
self
).
__init__
(
super
(
SSDResnet101V1FpnFeatureExtractor
,
self
).
__init__
(
is_training
,
depth_multiplier
,
min_depth
,
pad_to_multiple
,
is_training
,
depth_multiplier
,
min_depth
,
pad_to_multiple
,
conv_hyperparams
,
resnet_v1
.
resnet_v1_101
,
'resnet_v1_101'
,
'fpn'
,
conv_hyperparams
,
resnet_v1
.
resnet_v1_101
,
'resnet_v1_101'
,
'fpn'
,
batch_norm_trainable
,
reuse_weights
,
use_explicit_padding
)
batch_norm_trainable
,
reuse_weights
,
use_explicit_padding
,
inplace_batchnorm_update
)
class
SSDResnet152V1FpnFeatureExtractor
(
_SSDResnetV1FpnFeatureExtractor
):
class
SSDResnet152V1FpnFeatureExtractor
(
_SSDResnetV1FpnFeatureExtractor
):
...
@@ -248,7 +268,8 @@ class SSDResnet152V1FpnFeatureExtractor(_SSDResnetV1FpnFeatureExtractor):
...
@@ -248,7 +268,8 @@ class SSDResnet152V1FpnFeatureExtractor(_SSDResnetV1FpnFeatureExtractor):
batch_norm_trainable
=
True
,
batch_norm_trainable
=
True
,
reuse_weights
=
None
,
reuse_weights
=
None
,
use_explicit_padding
=
False
,
use_explicit_padding
=
False
,
use_depthwise
=
False
):
use_depthwise
=
False
,
inplace_batchnorm_update
=
False
):
"""Resnet152 v1 FPN Feature Extractor for SSD Models.
"""Resnet152 v1 FPN Feature Extractor for SSD Models.
Args:
Args:
...
@@ -266,8 +287,14 @@ class SSDResnet152V1FpnFeatureExtractor(_SSDResnetV1FpnFeatureExtractor):
...
@@ -266,8 +287,14 @@ class SSDResnet152V1FpnFeatureExtractor(_SSDResnetV1FpnFeatureExtractor):
use_explicit_padding: Whether to use explicit padding when extracting
use_explicit_padding: Whether to use explicit padding when extracting
features. Default is False. UNUSED currently.
features. Default is False. UNUSED currently.
use_depthwise: Whether to use depthwise convolutions. UNUSED currently.
use_depthwise: Whether to use depthwise convolutions. UNUSED currently.
inplace_batchnorm_update: Whether to update batch_norm inplace during
training. This is required for batch norm to work correctly on TPUs.
When this is false, user must add a control dependency on
tf.GraphKeys.UPDATE_OPS for train/loss op in order to update the batch
norm moving average parameters.
"""
"""
super
(
SSDResnet152V1FpnFeatureExtractor
,
self
).
__init__
(
super
(
SSDResnet152V1FpnFeatureExtractor
,
self
).
__init__
(
is_training
,
depth_multiplier
,
min_depth
,
pad_to_multiple
,
is_training
,
depth_multiplier
,
min_depth
,
pad_to_multiple
,
conv_hyperparams
,
resnet_v1
.
resnet_v1_152
,
'resnet_v1_152'
,
'fpn'
,
conv_hyperparams
,
resnet_v1
.
resnet_v1_152
,
'resnet_v1_152'
,
'fpn'
,
batch_norm_trainable
,
reuse_weights
,
use_explicit_padding
)
batch_norm_trainable
,
reuse_weights
,
use_explicit_padding
,
inplace_batchnorm_update
)
research/object_detection/protos/eval.proto
View file @
abd50423
...
@@ -68,4 +68,8 @@ message EvalConfig {
...
@@ -68,4 +68,8 @@ message EvalConfig {
// Whether to keep image identifier in filename when exported to
// Whether to keep image identifier in filename when exported to
// visualization_export_dir.
// visualization_export_dir.
optional
bool
keep_image_id_for_visualization_export
=
19
[
default
=
false
];
optional
bool
keep_image_id_for_visualization_export
=
19
[
default
=
false
];
// Whether to retain original images (i.e. not pre-processed) in the tensor
// dictionary, so that they can be displayed in Tensorboard.
optional
bool
retain_original_images
=
23
[
default
=
true
];
}
}
research/object_detection/protos/faster_rcnn.proto
View file @
abd50423
...
@@ -131,6 +131,12 @@ message FasterRcnn {
...
@@ -131,6 +131,12 @@ message FasterRcnn {
// to use sigmoid loss and enable merge_multiple_label_boxes.
// to use sigmoid loss and enable merge_multiple_label_boxes.
// If not specified, Softmax loss is used as default.
// If not specified, Softmax loss is used as default.
optional
ClassificationLoss
second_stage_classification_loss
=
29
;
optional
ClassificationLoss
second_stage_classification_loss
=
29
;
// Whether to update batch_norm inplace during training. This is required
// for batch norm to work correctly on TPUs. When this is false, user must add
// a control dependency on tf.GraphKeys.UPDATE_OPS for train/loss op in order
// to update the batch norm moving average parameters.
optional
bool
inplace_batchnorm_update
=
30
[
default
=
false
];
}
}
...
...
research/object_detection/protos/input_reader.proto
View file @
abd50423
...
@@ -51,6 +51,9 @@ message InputReader {
...
@@ -51,6 +51,9 @@ message InputReader {
// Number of reader instances to create.
// Number of reader instances to create.
optional
uint32
num_readers
=
6
[
default
=
32
];
optional
uint32
num_readers
=
6
[
default
=
32
];
// Number of records to read from each reader at once.
optional
uint32
read_block_length
=
15
[
default
=
32
];
// Number of decoded records to prefetch before batching.
// Number of decoded records to prefetch before batching.
optional
uint32
prefetch_size
=
13
[
default
=
512
];
optional
uint32
prefetch_size
=
13
[
default
=
512
];
...
...
research/object_detection/protos/ssd.proto
View file @
abd50423
...
@@ -59,6 +59,12 @@ message Ssd {
...
@@ -59,6 +59,12 @@ message Ssd {
// Loss configuration for training.
// Loss configuration for training.
optional
Loss
loss
=
11
;
optional
Loss
loss
=
11
;
// Whether to update batch_norm inplace during training. This is required
// for batch norm to work correctly on TPUs. When this is false, user must add
// a control dependency on tf.GraphKeys.UPDATE_OPS for train/loss op in order
// to update the batch norm moving average parameters.
optional
bool
inplace_batchnorm_update
=
15
[
default
=
false
];
}
}
...
...
research/object_detection/protos/train.proto
View file @
abd50423
...
@@ -94,4 +94,9 @@ message TrainConfig {
...
@@ -94,4 +94,9 @@ message TrainConfig {
// Whether to remove padding along `num_boxes` dimension of the groundtruth
// Whether to remove padding along `num_boxes` dimension of the groundtruth
// tensors.
// tensors.
optional
bool
unpad_groundtruth_tensors
=
21
[
default
=
true
];
optional
bool
unpad_groundtruth_tensors
=
21
[
default
=
true
];
// Whether to retain original images (i.e. not pre-processed) in the tensor
// dictionary, so that they can be displayed in Tensorboard. Note that this
// will lead to a larger memory footprint.
optional
bool
retain_original_images
=
23
[
default
=
false
];
}
}
research/object_detection/trainer.py
View file @
abd50423
...
@@ -264,29 +264,6 @@ def train(create_tensor_dict_fn, create_model_fn, train_config, master, task,
...
@@ -264,29 +264,6 @@ def train(create_tensor_dict_fn, create_model_fn, train_config, master, task,
total_num_replicas
=
worker_replicas
)
total_num_replicas
=
worker_replicas
)
sync_optimizer
=
training_optimizer
sync_optimizer
=
training_optimizer
# Create ops required to initialize the model from a given checkpoint.
init_fn
=
None
if
train_config
.
fine_tune_checkpoint
:
if
not
train_config
.
fine_tune_checkpoint_type
:
# train_config.from_detection_checkpoint field is deprecated. For
# backward compatibility, fine_tune_checkpoint_type is set based on
# from_detection_checkpoint.
if
train_config
.
from_detection_checkpoint
:
train_config
.
fine_tune_checkpoint_type
=
'detection'
else
:
train_config
.
fine_tune_checkpoint_type
=
'classification'
var_map
=
detection_model
.
restore_map
(
fine_tune_checkpoint_type
=
train_config
.
fine_tune_checkpoint_type
,
load_all_detection_checkpoint_vars
=
(
train_config
.
load_all_detection_checkpoint_vars
))
available_var_map
=
(
variables_helper
.
get_variables_available_in_checkpoint
(
var_map
,
train_config
.
fine_tune_checkpoint
))
init_saver
=
tf
.
train
.
Saver
(
available_var_map
)
def
initializer_fn
(
sess
):
init_saver
.
restore
(
sess
,
train_config
.
fine_tune_checkpoint
)
init_fn
=
initializer_fn
with
tf
.
device
(
deploy_config
.
optimizer_device
()):
with
tf
.
device
(
deploy_config
.
optimizer_device
()):
regularization_losses
=
(
None
if
train_config
.
add_regularization_loss
regularization_losses
=
(
None
if
train_config
.
add_regularization_loss
else
[])
else
[])
...
@@ -354,6 +331,29 @@ def train(create_tensor_dict_fn, create_model_fn, train_config, master, task,
...
@@ -354,6 +331,29 @@ def train(create_tensor_dict_fn, create_model_fn, train_config, master, task,
saver
=
tf
.
train
.
Saver
(
saver
=
tf
.
train
.
Saver
(
keep_checkpoint_every_n_hours
=
keep_checkpoint_every_n_hours
)
keep_checkpoint_every_n_hours
=
keep_checkpoint_every_n_hours
)
# Create ops required to initialize the model from a given checkpoint.
init_fn
=
None
if
train_config
.
fine_tune_checkpoint
:
if
not
train_config
.
fine_tune_checkpoint_type
:
# train_config.from_detection_checkpoint field is deprecated. For
# backward compatibility, fine_tune_checkpoint_type is set based on
# from_detection_checkpoint.
if
train_config
.
from_detection_checkpoint
:
train_config
.
fine_tune_checkpoint_type
=
'detection'
else
:
train_config
.
fine_tune_checkpoint_type
=
'classification'
var_map
=
detection_model
.
restore_map
(
fine_tune_checkpoint_type
=
train_config
.
fine_tune_checkpoint_type
,
load_all_detection_checkpoint_vars
=
(
train_config
.
load_all_detection_checkpoint_vars
))
available_var_map
=
(
variables_helper
.
get_variables_available_in_checkpoint
(
var_map
,
train_config
.
fine_tune_checkpoint
))
init_saver
=
tf
.
train
.
Saver
(
available_var_map
)
def
initializer_fn
(
sess
):
init_saver
.
restore
(
sess
,
train_config
.
fine_tune_checkpoint
)
init_fn
=
initializer_fn
slim
.
learning
.
train
(
slim
.
learning
.
train
(
train_tensor
,
train_tensor
,
logdir
=
train_dir
,
logdir
=
train_dir
,
...
...
research/object_detection/utils/config_util.py
View file @
abd50423
...
@@ -14,10 +14,13 @@
...
@@ -14,10 +14,13 @@
# ==============================================================================
# ==============================================================================
"""Functions for reading and updating configuration files."""
"""Functions for reading and updating configuration files."""
import
os
import
tensorflow
as
tf
import
tensorflow
as
tf
from
google.protobuf
import
text_format
from
google.protobuf
import
text_format
from
tensorflow.python.lib.io
import
file_io
from
object_detection.protos
import
eval_pb2
from
object_detection.protos
import
eval_pb2
from
object_detection.protos
import
input_reader_pb2
from
object_detection.protos
import
input_reader_pb2
from
object_detection.protos
import
model_pb2
from
object_detection.protos
import
model_pb2
...
@@ -119,6 +122,24 @@ def create_pipeline_proto_from_configs(configs):
...
@@ -119,6 +122,24 @@ def create_pipeline_proto_from_configs(configs):
return
pipeline_config
return
pipeline_config
def
save_pipeline_config
(
pipeline_config
,
directory
):
"""Saves a pipeline config text file to disk.
Args:
pipeline_config: A pipeline_pb2.TrainEvalPipelineConfig.
directory: The model directory into which the pipeline config file will be
saved.
"""
if
not
file_io
.
file_exists
(
directory
):
file_io
.
recursive_create_dir
(
directory
)
pipeline_config_path
=
os
.
path
.
join
(
directory
,
"pipeline.config"
)
config_text
=
text_format
.
MessageToString
(
pipeline_config
)
with
tf
.
gfile
.
Open
(
pipeline_config_path
,
"wb"
)
as
f
:
tf
.
logging
.
info
(
"Writing pipeline config file to %s"
,
pipeline_config_path
)
f
.
write
(
config_text
)
def
get_configs_from_multiple_files
(
model_config_path
=
""
,
def
get_configs_from_multiple_files
(
model_config_path
=
""
,
train_config_path
=
""
,
train_config_path
=
""
,
train_input_config_path
=
""
,
train_input_config_path
=
""
,
...
...
research/object_detection/utils/config_util_test.py
View file @
abd50423
...
@@ -110,6 +110,23 @@ class ConfigUtilTest(tf.test.TestCase):
...
@@ -110,6 +110,23 @@ class ConfigUtilTest(tf.test.TestCase):
config_util
.
create_pipeline_proto_from_configs
(
configs
))
config_util
.
create_pipeline_proto_from_configs
(
configs
))
self
.
assertEqual
(
pipeline_config
,
pipeline_config_reconstructed
)
self
.
assertEqual
(
pipeline_config
,
pipeline_config_reconstructed
)
def
test_save_pipeline_config
(
self
):
"""Tests that the pipeline config is properly saved to disk."""
pipeline_config
=
pipeline_pb2
.
TrainEvalPipelineConfig
()
pipeline_config
.
model
.
faster_rcnn
.
num_classes
=
10
pipeline_config
.
train_config
.
batch_size
=
32
pipeline_config
.
train_input_reader
.
label_map_path
=
"path/to/label_map"
pipeline_config
.
eval_config
.
num_examples
=
20
pipeline_config
.
eval_input_reader
.
queue_capacity
=
100
config_util
.
save_pipeline_config
(
pipeline_config
,
self
.
get_temp_dir
())
configs
=
config_util
.
get_configs_from_pipeline_file
(
os
.
path
.
join
(
self
.
get_temp_dir
(),
"pipeline.config"
))
pipeline_config_reconstructed
=
(
config_util
.
create_pipeline_proto_from_configs
(
configs
))
self
.
assertEqual
(
pipeline_config
,
pipeline_config_reconstructed
)
def
test_get_configs_from_multiple_files
(
self
):
def
test_get_configs_from_multiple_files
(
self
):
"""Tests that proto configs can be read from multiple files."""
"""Tests that proto configs can be read from multiple files."""
temp_dir
=
self
.
get_temp_dir
()
temp_dir
=
self
.
get_temp_dir
()
...
...
research/object_detection/utils/context_manager.py
0 → 100644
View file @
abd50423
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python context management helper."""
from
__future__
import
absolute_import
from
__future__
import
division
from
__future__
import
print_function
class
IdentityContextManager
(
object
):
"""Returns an identity context manager that does nothing.
This is helpful in setting up conditional `with` statement as below:
with slim.arg_scope(x) if use_slim_scope else IdentityContextManager():
do_stuff()
"""
def
__enter__
(
self
):
return
None
def
__exit__
(
self
,
exec_type
,
exec_value
,
traceback
):
del
exec_type
del
exec_value
del
traceback
return
False
research/object_detection/utils/context_manager_test.py
0 → 100644
View file @
abd50423
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow_models.object_detection.utils.context_manager."""
from
__future__
import
absolute_import
from
__future__
import
division
from
__future__
import
print_function
import
tensorflow
as
tf
from
object_detection.utils
import
context_manager
class
ContextManagerTest
(
tf
.
test
.
TestCase
):
def
test_identity_context_manager
(
self
):
with
context_manager
.
IdentityContextManager
()
as
identity_context
:
self
.
assertIsNone
(
identity_context
)
if
__name__
==
'__main__'
:
tf
.
test
.
main
()
research/object_detection/utils/dataset_util.py
View file @
abd50423
...
@@ -123,11 +123,16 @@ def read_dataset(file_read_func, decode_func, input_files, config):
...
@@ -123,11 +123,16 @@ def read_dataset(file_read_func, decode_func, input_files, config):
if
config
.
shuffle
:
if
config
.
shuffle
:
filename_dataset
=
filename_dataset
.
shuffle
(
filename_dataset
=
filename_dataset
.
shuffle
(
config
.
filenames_shuffle_buffer_size
)
config
.
filenames_shuffle_buffer_size
)
elif
config
.
num_readers
>
1
:
tf
.
logging
.
warning
(
'`shuffle` is false, but the input data stream is '
'still slightly shuffled since `num_readers` > 1.'
)
filename_dataset
=
filename_dataset
.
repeat
(
config
.
num_epochs
or
None
)
filename_dataset
=
filename_dataset
.
repeat
(
config
.
num_epochs
or
None
)
records_dataset
=
filename_dataset
.
apply
(
records_dataset
=
filename_dataset
.
apply
(
tf
.
contrib
.
data
.
parallel_interleave
(
tf
.
contrib
.
data
.
parallel_interleave
(
file_read_func
,
cycle_length
=
config
.
num_readers
,
sloppy
=
True
))
file_read_func
,
cycle_length
=
config
.
num_readers
,
block_length
=
config
.
read_block_length
,
sloppy
=
True
))
if
config
.
shuffle
:
if
config
.
shuffle
:
records_dataset
.
shuffle
(
config
.
shuffle_buffer_size
)
records_dataset
.
shuffle
(
config
.
shuffle_buffer_size
)
tensor_dataset
=
records_dataset
.
map
(
tensor_dataset
=
records_dataset
.
map
(
...
...
research/object_detection/utils/ops.py
View file @
abd50423
...
@@ -180,26 +180,24 @@ def pad_to_multiple(tensor, multiple):
...
@@ -180,26 +180,24 @@ def pad_to_multiple(tensor, multiple):
padded_tensor_width
=
int
(
padded_tensor_width
=
int
(
math
.
ceil
(
float
(
tensor_width
)
/
multiple
)
*
multiple
)
math
.
ceil
(
float
(
tensor_width
)
/
multiple
)
*
multiple
)
if
(
padded_tensor_height
==
tensor_height
and
padded_tensor_width
==
tensor_width
):
return
tensor
if
tensor_depth
is
None
:
if
tensor_depth
is
None
:
tensor_depth
=
tf
.
shape
(
tensor
)[
3
]
tensor_depth
=
tf
.
shape
(
tensor
)[
3
]
# Use tf.concat instead of tf.pad to preserve static shape
# Use tf.concat instead of tf.pad to preserve static shape
height_pad
=
tf
.
zeros
([
if
padded_tensor_height
!=
tensor_height
:
batch_size
,
padded_tensor_height
-
tensor_height
,
tensor_width
,
height_pad
=
tf
.
zeros
([
tensor_depth
batch_size
,
padded_tensor_height
-
tensor_height
,
tensor_width
,
])
tensor_depth
padded_tensor
=
tf
.
concat
([
tensor
,
height_pad
],
1
)
])
width_pad
=
tf
.
zeros
([
tensor
=
tf
.
concat
([
tensor
,
height_pad
],
1
)
batch_size
,
padded_tensor_height
,
padded_tensor_width
-
tensor_width
,
if
padded_tensor_width
!=
tensor_width
:
tensor_depth
width_pad
=
tf
.
zeros
([
])
batch_size
,
padded_tensor_height
,
padded_tensor_width
-
tensor_width
,
padded_tensor
=
tf
.
concat
([
padded_tensor
,
width_pad
],
2
)
tensor_depth
])
return
padded_tensor
tensor
=
tf
.
concat
([
tensor
,
width_pad
],
2
)
return
tensor
def
padded_one_hot_encoding
(
indices
,
depth
,
left_pad
):
def
padded_one_hot_encoding
(
indices
,
depth
,
left_pad
):
...
...
research/object_detection/utils/ops_test.py
View file @
abd50423
...
@@ -136,6 +136,13 @@ class OpsTestPadToMultiple(tf.test.TestCase):
...
@@ -136,6 +136,13 @@ class OpsTestPadToMultiple(tf.test.TestCase):
padded_tensor_out
=
sess
.
run
(
padded_tensor
)
padded_tensor_out
=
sess
.
run
(
padded_tensor
)
self
.
assertEqual
((
1
,
2
,
2
,
1
),
padded_tensor_out
.
shape
)
self
.
assertEqual
((
1
,
2
,
2
,
1
),
padded_tensor_out
.
shape
)
def
test_non_square_padding
(
self
):
tensor
=
tf
.
constant
([[[[
0.
],
[
0.
]]]])
padded_tensor
=
ops
.
pad_to_multiple
(
tensor
,
2
)
with
self
.
test_session
()
as
sess
:
padded_tensor_out
=
sess
.
run
(
padded_tensor
)
self
.
assertEqual
((
1
,
2
,
2
,
1
),
padded_tensor_out
.
shape
)
def
test_padding
(
self
):
def
test_padding
(
self
):
tensor
=
tf
.
constant
([[[[
0.
],
[
0.
]],
[[
0.
],
[
0.
]]]])
tensor
=
tf
.
constant
([[[[
0.
],
[
0.
]],
[[
0.
],
[
0.
]]]])
padded_tensor
=
ops
.
pad_to_multiple
(
tensor
,
4
)
padded_tensor
=
ops
.
pad_to_multiple
(
tensor
,
4
)
...
...
research/object_detection/utils/variables_helper.py
View file @
abd50423
...
@@ -42,7 +42,7 @@ def filter_variables(variables, filter_regex_list, invert=False):
...
@@ -42,7 +42,7 @@ def filter_variables(variables, filter_regex_list, invert=False):
a list of filtered variables.
a list of filtered variables.
"""
"""
kept_vars
=
[]
kept_vars
=
[]
variables_to_ignore_patterns
=
filter
(
None
,
filter_regex_list
)
variables_to_ignore_patterns
=
list
(
filter
(
None
,
filter_regex_list
)
)
for
var
in
variables
:
for
var
in
variables
:
add
=
True
add
=
True
for
pattern
in
variables_to_ignore_patterns
:
for
pattern
in
variables_to_ignore_patterns
:
...
...
Prev
1
2
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment