Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ModelZoo
ResNet50_tensorflow
Commits
44fa1d37
Commit
44fa1d37
authored
Jun 29, 2017
by
Alex Lee
Browse files
Merge remote-tracking branch 'upstream/master'
parents
d3628a74
6e367f67
Changes
296
Show whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
3673 additions
and
0 deletions
+3673
-0
object_detection/builders/box_coder_builder_test.py
object_detection/builders/box_coder_builder_test.py
+107
-0
object_detection/builders/box_predictor_builder.py
object_detection/builders/box_predictor_builder.py
+106
-0
object_detection/builders/box_predictor_builder_test.py
object_detection/builders/box_predictor_builder_test.py
+391
-0
object_detection/builders/hyperparams_builder.py
object_detection/builders/hyperparams_builder.py
+169
-0
object_detection/builders/hyperparams_builder_test.py
object_detection/builders/hyperparams_builder_test.py
+450
-0
object_detection/builders/image_resizer_builder.py
object_detection/builders/image_resizer_builder.py
+62
-0
object_detection/builders/image_resizer_builder_test.py
object_detection/builders/image_resizer_builder_test.py
+70
-0
object_detection/builders/input_reader_builder.py
object_detection/builders/input_reader_builder.py
+65
-0
object_detection/builders/input_reader_builder_test.py
object_detection/builders/input_reader_builder_test.py
+92
-0
object_detection/builders/losses_builder.py
object_detection/builders/losses_builder.py
+161
-0
object_detection/builders/losses_builder_test.py
object_detection/builders/losses_builder_test.py
+323
-0
object_detection/builders/matcher_builder.py
object_detection/builders/matcher_builder.py
+51
-0
object_detection/builders/matcher_builder_test.py
object_detection/builders/matcher_builder_test.py
+97
-0
object_detection/builders/model_builder.py
object_detection/builders/model_builder.py
+303
-0
object_detection/builders/model_builder_test.py
object_detection/builders/model_builder_test.py
+456
-0
object_detection/builders/optimizer_builder.py
object_detection/builders/optimizer_builder.py
+112
-0
object_detection/builders/optimizer_builder_test.py
object_detection/builders/optimizer_builder_test.py
+197
-0
object_detection/builders/post_processing_builder.py
object_detection/builders/post_processing_builder.py
+111
-0
object_detection/builders/post_processing_builder_test.py
object_detection/builders/post_processing_builder_test.py
+73
-0
object_detection/builders/preprocessor_builder.py
object_detection/builders/preprocessor_builder.py
+277
-0
No files found.
Too many changes to show.
To preserve performance only
296 of 296+
files are displayed.
Plain diff
Email patch
object_detection/builders/box_coder_builder_test.py
0 → 100644
View file @
44fa1d37
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for box_coder_builder."""
import
tensorflow
as
tf
from
google.protobuf
import
text_format
from
object_detection.box_coders
import
faster_rcnn_box_coder
from
object_detection.box_coders
import
mean_stddev_box_coder
from
object_detection.box_coders
import
square_box_coder
from
object_detection.builders
import
box_coder_builder
from
object_detection.protos
import
box_coder_pb2
class
BoxCoderBuilderTest
(
tf
.
test
.
TestCase
):
def
test_build_faster_rcnn_box_coder_with_defaults
(
self
):
box_coder_text_proto
=
"""
faster_rcnn_box_coder {
}
"""
box_coder_proto
=
box_coder_pb2
.
BoxCoder
()
text_format
.
Merge
(
box_coder_text_proto
,
box_coder_proto
)
box_coder_object
=
box_coder_builder
.
build
(
box_coder_proto
)
self
.
assertTrue
(
isinstance
(
box_coder_object
,
faster_rcnn_box_coder
.
FasterRcnnBoxCoder
))
self
.
assertEqual
(
box_coder_object
.
_scale_factors
,
[
10.0
,
10.0
,
5.0
,
5.0
])
def
test_build_faster_rcnn_box_coder_with_non_default_parameters
(
self
):
box_coder_text_proto
=
"""
faster_rcnn_box_coder {
y_scale: 6.0
x_scale: 3.0
height_scale: 7.0
width_scale: 8.0
}
"""
box_coder_proto
=
box_coder_pb2
.
BoxCoder
()
text_format
.
Merge
(
box_coder_text_proto
,
box_coder_proto
)
box_coder_object
=
box_coder_builder
.
build
(
box_coder_proto
)
self
.
assertTrue
(
isinstance
(
box_coder_object
,
faster_rcnn_box_coder
.
FasterRcnnBoxCoder
))
self
.
assertEqual
(
box_coder_object
.
_scale_factors
,
[
6.0
,
3.0
,
7.0
,
8.0
])
def
test_build_mean_stddev_box_coder
(
self
):
box_coder_text_proto
=
"""
mean_stddev_box_coder {
}
"""
box_coder_proto
=
box_coder_pb2
.
BoxCoder
()
text_format
.
Merge
(
box_coder_text_proto
,
box_coder_proto
)
box_coder_object
=
box_coder_builder
.
build
(
box_coder_proto
)
self
.
assertTrue
(
isinstance
(
box_coder_object
,
mean_stddev_box_coder
.
MeanStddevBoxCoder
))
def
test_build_square_box_coder_with_defaults
(
self
):
box_coder_text_proto
=
"""
square_box_coder {
}
"""
box_coder_proto
=
box_coder_pb2
.
BoxCoder
()
text_format
.
Merge
(
box_coder_text_proto
,
box_coder_proto
)
box_coder_object
=
box_coder_builder
.
build
(
box_coder_proto
)
self
.
assertTrue
(
isinstance
(
box_coder_object
,
square_box_coder
.
SquareBoxCoder
))
self
.
assertEqual
(
box_coder_object
.
_scale_factors
,
[
10.0
,
10.0
,
5.0
])
def
test_build_square_box_coder_with_non_default_parameters
(
self
):
box_coder_text_proto
=
"""
square_box_coder {
y_scale: 6.0
x_scale: 3.0
length_scale: 7.0
}
"""
box_coder_proto
=
box_coder_pb2
.
BoxCoder
()
text_format
.
Merge
(
box_coder_text_proto
,
box_coder_proto
)
box_coder_object
=
box_coder_builder
.
build
(
box_coder_proto
)
self
.
assertTrue
(
isinstance
(
box_coder_object
,
square_box_coder
.
SquareBoxCoder
))
self
.
assertEqual
(
box_coder_object
.
_scale_factors
,
[
6.0
,
3.0
,
7.0
])
def
test_raise_error_on_empty_box_coder
(
self
):
box_coder_text_proto
=
"""
"""
box_coder_proto
=
box_coder_pb2
.
BoxCoder
()
text_format
.
Merge
(
box_coder_text_proto
,
box_coder_proto
)
with
self
.
assertRaises
(
ValueError
):
box_coder_builder
.
build
(
box_coder_proto
)
if
__name__
==
'__main__'
:
tf
.
test
.
main
()
object_detection/builders/box_predictor_builder.py
0 → 100644
View file @
44fa1d37
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Function to build box predictor from configuration."""
from
object_detection.core
import
box_predictor
from
object_detection.protos
import
box_predictor_pb2
def
build
(
argscope_fn
,
box_predictor_config
,
is_training
,
num_classes
):
"""Builds box predictor based on the configuration.
Builds box predictor based on the configuration. See box_predictor.proto for
configurable options. Also, see box_predictor.py for more details.
Args:
argscope_fn: A function that takes the following inputs:
* hyperparams_pb2.Hyperparams proto
* a boolean indicating if the model is in training mode.
and returns a tf slim argscope for Conv and FC hyperparameters.
box_predictor_config: box_predictor_pb2.BoxPredictor proto containing
configuration.
is_training: Whether the models is in training mode.
num_classes: Number of classes to predict.
Returns:
box_predictor: box_predictor.BoxPredictor object.
Raises:
ValueError: On unknown box predictor.
"""
if
not
isinstance
(
box_predictor_config
,
box_predictor_pb2
.
BoxPredictor
):
raise
ValueError
(
'box_predictor_config not of type '
'box_predictor_pb2.BoxPredictor.'
)
box_predictor_oneof
=
box_predictor_config
.
WhichOneof
(
'box_predictor_oneof'
)
if
box_predictor_oneof
==
'convolutional_box_predictor'
:
conv_box_predictor
=
box_predictor_config
.
convolutional_box_predictor
conv_hyperparams
=
argscope_fn
(
conv_box_predictor
.
conv_hyperparams
,
is_training
)
box_predictor_object
=
box_predictor
.
ConvolutionalBoxPredictor
(
is_training
=
is_training
,
num_classes
=
num_classes
,
conv_hyperparams
=
conv_hyperparams
,
min_depth
=
conv_box_predictor
.
min_depth
,
max_depth
=
conv_box_predictor
.
max_depth
,
num_layers_before_predictor
=
(
conv_box_predictor
.
num_layers_before_predictor
),
use_dropout
=
conv_box_predictor
.
use_dropout
,
dropout_keep_prob
=
conv_box_predictor
.
dropout_keep_probability
,
kernel_size
=
conv_box_predictor
.
kernel_size
,
box_code_size
=
conv_box_predictor
.
box_code_size
,
apply_sigmoid_to_scores
=
conv_box_predictor
.
apply_sigmoid_to_scores
)
return
box_predictor_object
if
box_predictor_oneof
==
'mask_rcnn_box_predictor'
:
mask_rcnn_box_predictor
=
box_predictor_config
.
mask_rcnn_box_predictor
fc_hyperparams
=
argscope_fn
(
mask_rcnn_box_predictor
.
fc_hyperparams
,
is_training
)
conv_hyperparams
=
None
if
mask_rcnn_box_predictor
.
HasField
(
'conv_hyperparams'
):
conv_hyperparams
=
argscope_fn
(
mask_rcnn_box_predictor
.
conv_hyperparams
,
is_training
)
box_predictor_object
=
box_predictor
.
MaskRCNNBoxPredictor
(
is_training
=
is_training
,
num_classes
=
num_classes
,
fc_hyperparams
=
fc_hyperparams
,
use_dropout
=
mask_rcnn_box_predictor
.
use_dropout
,
dropout_keep_prob
=
mask_rcnn_box_predictor
.
dropout_keep_probability
,
box_code_size
=
mask_rcnn_box_predictor
.
box_code_size
,
conv_hyperparams
=
conv_hyperparams
,
predict_instance_masks
=
mask_rcnn_box_predictor
.
predict_instance_masks
,
mask_prediction_conv_depth
=
(
mask_rcnn_box_predictor
.
mask_prediction_conv_depth
),
predict_keypoints
=
mask_rcnn_box_predictor
.
predict_keypoints
)
return
box_predictor_object
if
box_predictor_oneof
==
'rfcn_box_predictor'
:
rfcn_box_predictor
=
box_predictor_config
.
rfcn_box_predictor
conv_hyperparams
=
argscope_fn
(
rfcn_box_predictor
.
conv_hyperparams
,
is_training
)
box_predictor_object
=
box_predictor
.
RfcnBoxPredictor
(
is_training
=
is_training
,
num_classes
=
num_classes
,
conv_hyperparams
=
conv_hyperparams
,
crop_size
=
[
rfcn_box_predictor
.
crop_height
,
rfcn_box_predictor
.
crop_width
],
num_spatial_bins
=
[
rfcn_box_predictor
.
num_spatial_bins_height
,
rfcn_box_predictor
.
num_spatial_bins_width
],
depth
=
rfcn_box_predictor
.
depth
,
box_code_size
=
rfcn_box_predictor
.
box_code_size
)
return
box_predictor_object
raise
ValueError
(
'Unknown box predictor: {}'
.
format
(
box_predictor_oneof
))
object_detection/builders/box_predictor_builder_test.py
0 → 100644
View file @
44fa1d37
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for box_predictor_builder."""
import
mock
import
tensorflow
as
tf
from
google.protobuf
import
text_format
from
object_detection.builders
import
box_predictor_builder
from
object_detection.builders
import
hyperparams_builder
from
object_detection.protos
import
box_predictor_pb2
from
object_detection.protos
import
hyperparams_pb2
class
ConvolutionalBoxPredictorBuilderTest
(
tf
.
test
.
TestCase
):
def
test_box_predictor_calls_conv_argscope_fn
(
self
):
conv_hyperparams_text_proto
=
"""
regularizer {
l1_regularizer {
weight: 0.0003
}
}
initializer {
truncated_normal_initializer {
mean: 0.0
stddev: 0.3
}
}
activation: RELU_6
"""
hyperparams_proto
=
hyperparams_pb2
.
Hyperparams
()
text_format
.
Merge
(
conv_hyperparams_text_proto
,
hyperparams_proto
)
def
mock_conv_argscope_builder
(
conv_hyperparams_arg
,
is_training
):
return
(
conv_hyperparams_arg
,
is_training
)
box_predictor_proto
=
box_predictor_pb2
.
BoxPredictor
()
box_predictor_proto
.
convolutional_box_predictor
.
conv_hyperparams
.
CopyFrom
(
hyperparams_proto
)
box_predictor
=
box_predictor_builder
.
build
(
argscope_fn
=
mock_conv_argscope_builder
,
box_predictor_config
=
box_predictor_proto
,
is_training
=
False
,
num_classes
=
10
)
(
conv_hyperparams_actual
,
is_training
)
=
box_predictor
.
_conv_hyperparams
self
.
assertAlmostEqual
((
hyperparams_proto
.
regularizer
.
l1_regularizer
.
weight
),
(
conv_hyperparams_actual
.
regularizer
.
l1_regularizer
.
weight
))
self
.
assertAlmostEqual
((
hyperparams_proto
.
initializer
.
truncated_normal_initializer
.
stddev
),
(
conv_hyperparams_actual
.
initializer
.
truncated_normal_initializer
.
stddev
))
self
.
assertAlmostEqual
((
hyperparams_proto
.
initializer
.
truncated_normal_initializer
.
mean
),
(
conv_hyperparams_actual
.
initializer
.
truncated_normal_initializer
.
mean
))
self
.
assertEqual
(
hyperparams_proto
.
activation
,
conv_hyperparams_actual
.
activation
)
self
.
assertFalse
(
is_training
)
def
test_construct_non_default_conv_box_predictor
(
self
):
box_predictor_text_proto
=
"""
convolutional_box_predictor {
min_depth: 2
max_depth: 16
num_layers_before_predictor: 2
use_dropout: false
dropout_keep_probability: 0.4
kernel_size: 3
box_code_size: 3
apply_sigmoid_to_scores: true
}
"""
conv_hyperparams_text_proto
=
"""
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
hyperparams_proto
=
hyperparams_pb2
.
Hyperparams
()
text_format
.
Merge
(
conv_hyperparams_text_proto
,
hyperparams_proto
)
def
mock_conv_argscope_builder
(
conv_hyperparams_arg
,
is_training
):
return
(
conv_hyperparams_arg
,
is_training
)
box_predictor_proto
=
box_predictor_pb2
.
BoxPredictor
()
text_format
.
Merge
(
box_predictor_text_proto
,
box_predictor_proto
)
box_predictor_proto
.
convolutional_box_predictor
.
conv_hyperparams
.
CopyFrom
(
hyperparams_proto
)
box_predictor
=
box_predictor_builder
.
build
(
argscope_fn
=
mock_conv_argscope_builder
,
box_predictor_config
=
box_predictor_proto
,
is_training
=
False
,
num_classes
=
10
)
self
.
assertEqual
(
box_predictor
.
_min_depth
,
2
)
self
.
assertEqual
(
box_predictor
.
_max_depth
,
16
)
self
.
assertEqual
(
box_predictor
.
_num_layers_before_predictor
,
2
)
self
.
assertFalse
(
box_predictor
.
_use_dropout
)
self
.
assertAlmostEqual
(
box_predictor
.
_dropout_keep_prob
,
0.4
)
self
.
assertTrue
(
box_predictor
.
_apply_sigmoid_to_scores
)
self
.
assertEqual
(
box_predictor
.
num_classes
,
10
)
self
.
assertFalse
(
box_predictor
.
_is_training
)
def
test_construct_default_conv_box_predictor
(
self
):
box_predictor_text_proto
=
"""
convolutional_box_predictor {
conv_hyperparams {
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
}
}"""
box_predictor_proto
=
box_predictor_pb2
.
BoxPredictor
()
text_format
.
Merge
(
box_predictor_text_proto
,
box_predictor_proto
)
box_predictor
=
box_predictor_builder
.
build
(
argscope_fn
=
hyperparams_builder
.
build
,
box_predictor_config
=
box_predictor_proto
,
is_training
=
True
,
num_classes
=
90
)
self
.
assertEqual
(
box_predictor
.
_min_depth
,
0
)
self
.
assertEqual
(
box_predictor
.
_max_depth
,
0
)
self
.
assertEqual
(
box_predictor
.
_num_layers_before_predictor
,
0
)
self
.
assertTrue
(
box_predictor
.
_use_dropout
)
self
.
assertAlmostEqual
(
box_predictor
.
_dropout_keep_prob
,
0.8
)
self
.
assertFalse
(
box_predictor
.
_apply_sigmoid_to_scores
)
self
.
assertEqual
(
box_predictor
.
num_classes
,
90
)
self
.
assertTrue
(
box_predictor
.
_is_training
)
class
MaskRCNNBoxPredictorBuilderTest
(
tf
.
test
.
TestCase
):
def
test_box_predictor_builder_calls_fc_argscope_fn
(
self
):
fc_hyperparams_text_proto
=
"""
regularizer {
l1_regularizer {
weight: 0.0003
}
}
initializer {
truncated_normal_initializer {
mean: 0.0
stddev: 0.3
}
}
activation: RELU_6
op: FC
"""
hyperparams_proto
=
hyperparams_pb2
.
Hyperparams
()
text_format
.
Merge
(
fc_hyperparams_text_proto
,
hyperparams_proto
)
box_predictor_proto
=
box_predictor_pb2
.
BoxPredictor
()
box_predictor_proto
.
mask_rcnn_box_predictor
.
fc_hyperparams
.
CopyFrom
(
hyperparams_proto
)
mock_argscope_fn
=
mock
.
Mock
(
return_value
=
'arg_scope'
)
box_predictor
=
box_predictor_builder
.
build
(
argscope_fn
=
mock_argscope_fn
,
box_predictor_config
=
box_predictor_proto
,
is_training
=
False
,
num_classes
=
10
)
mock_argscope_fn
.
assert_called_with
(
hyperparams_proto
,
False
)
self
.
assertEqual
(
box_predictor
.
_fc_hyperparams
,
'arg_scope'
)
def
test_non_default_mask_rcnn_box_predictor
(
self
):
fc_hyperparams_text_proto
=
"""
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
activation: RELU_6
op: FC
"""
box_predictor_text_proto
=
"""
mask_rcnn_box_predictor {
use_dropout: true
dropout_keep_probability: 0.8
box_code_size: 3
}
"""
hyperparams_proto
=
hyperparams_pb2
.
Hyperparams
()
text_format
.
Merge
(
fc_hyperparams_text_proto
,
hyperparams_proto
)
def
mock_fc_argscope_builder
(
fc_hyperparams_arg
,
is_training
):
return
(
fc_hyperparams_arg
,
is_training
)
box_predictor_proto
=
box_predictor_pb2
.
BoxPredictor
()
text_format
.
Merge
(
box_predictor_text_proto
,
box_predictor_proto
)
box_predictor_proto
.
mask_rcnn_box_predictor
.
fc_hyperparams
.
CopyFrom
(
hyperparams_proto
)
box_predictor
=
box_predictor_builder
.
build
(
argscope_fn
=
mock_fc_argscope_builder
,
box_predictor_config
=
box_predictor_proto
,
is_training
=
True
,
num_classes
=
90
)
self
.
assertTrue
(
box_predictor
.
_use_dropout
)
self
.
assertAlmostEqual
(
box_predictor
.
_dropout_keep_prob
,
0.8
)
self
.
assertEqual
(
box_predictor
.
num_classes
,
90
)
self
.
assertTrue
(
box_predictor
.
_is_training
)
self
.
assertEqual
(
box_predictor
.
_box_code_size
,
3
)
def
test_build_default_mask_rcnn_box_predictor
(
self
):
box_predictor_proto
=
box_predictor_pb2
.
BoxPredictor
()
box_predictor_proto
.
mask_rcnn_box_predictor
.
fc_hyperparams
.
op
=
(
hyperparams_pb2
.
Hyperparams
.
FC
)
box_predictor
=
box_predictor_builder
.
build
(
argscope_fn
=
mock
.
Mock
(
return_value
=
'arg_scope'
),
box_predictor_config
=
box_predictor_proto
,
is_training
=
True
,
num_classes
=
90
)
self
.
assertFalse
(
box_predictor
.
_use_dropout
)
self
.
assertAlmostEqual
(
box_predictor
.
_dropout_keep_prob
,
0.5
)
self
.
assertEqual
(
box_predictor
.
num_classes
,
90
)
self
.
assertTrue
(
box_predictor
.
_is_training
)
self
.
assertEqual
(
box_predictor
.
_box_code_size
,
4
)
self
.
assertFalse
(
box_predictor
.
_predict_instance_masks
)
self
.
assertFalse
(
box_predictor
.
_predict_keypoints
)
def
test_build_box_predictor_with_mask_branch
(
self
):
box_predictor_proto
=
box_predictor_pb2
.
BoxPredictor
()
box_predictor_proto
.
mask_rcnn_box_predictor
.
fc_hyperparams
.
op
=
(
hyperparams_pb2
.
Hyperparams
.
FC
)
box_predictor_proto
.
mask_rcnn_box_predictor
.
conv_hyperparams
.
op
=
(
hyperparams_pb2
.
Hyperparams
.
CONV
)
box_predictor_proto
.
mask_rcnn_box_predictor
.
predict_instance_masks
=
True
box_predictor_proto
.
mask_rcnn_box_predictor
.
mask_prediction_conv_depth
=
512
mock_argscope_fn
=
mock
.
Mock
(
return_value
=
'arg_scope'
)
box_predictor
=
box_predictor_builder
.
build
(
argscope_fn
=
mock_argscope_fn
,
box_predictor_config
=
box_predictor_proto
,
is_training
=
True
,
num_classes
=
90
)
mock_argscope_fn
.
assert_has_calls
(
[
mock
.
call
(
box_predictor_proto
.
mask_rcnn_box_predictor
.
fc_hyperparams
,
True
),
mock
.
call
(
box_predictor_proto
.
mask_rcnn_box_predictor
.
conv_hyperparams
,
True
)],
any_order
=
True
)
self
.
assertFalse
(
box_predictor
.
_use_dropout
)
self
.
assertAlmostEqual
(
box_predictor
.
_dropout_keep_prob
,
0.5
)
self
.
assertEqual
(
box_predictor
.
num_classes
,
90
)
self
.
assertTrue
(
box_predictor
.
_is_training
)
self
.
assertEqual
(
box_predictor
.
_box_code_size
,
4
)
self
.
assertTrue
(
box_predictor
.
_predict_instance_masks
)
self
.
assertEqual
(
box_predictor
.
_mask_prediction_conv_depth
,
512
)
self
.
assertFalse
(
box_predictor
.
_predict_keypoints
)
class
RfcnBoxPredictorBuilderTest
(
tf
.
test
.
TestCase
):
def
test_box_predictor_calls_fc_argscope_fn
(
self
):
conv_hyperparams_text_proto
=
"""
regularizer {
l1_regularizer {
weight: 0.0003
}
}
initializer {
truncated_normal_initializer {
mean: 0.0
stddev: 0.3
}
}
activation: RELU_6
"""
hyperparams_proto
=
hyperparams_pb2
.
Hyperparams
()
text_format
.
Merge
(
conv_hyperparams_text_proto
,
hyperparams_proto
)
def
mock_conv_argscope_builder
(
conv_hyperparams_arg
,
is_training
):
return
(
conv_hyperparams_arg
,
is_training
)
box_predictor_proto
=
box_predictor_pb2
.
BoxPredictor
()
box_predictor_proto
.
rfcn_box_predictor
.
conv_hyperparams
.
CopyFrom
(
hyperparams_proto
)
box_predictor
=
box_predictor_builder
.
build
(
argscope_fn
=
mock_conv_argscope_builder
,
box_predictor_config
=
box_predictor_proto
,
is_training
=
False
,
num_classes
=
10
)
(
conv_hyperparams_actual
,
is_training
)
=
box_predictor
.
_conv_hyperparams
self
.
assertAlmostEqual
((
hyperparams_proto
.
regularizer
.
l1_regularizer
.
weight
),
(
conv_hyperparams_actual
.
regularizer
.
l1_regularizer
.
weight
))
self
.
assertAlmostEqual
((
hyperparams_proto
.
initializer
.
truncated_normal_initializer
.
stddev
),
(
conv_hyperparams_actual
.
initializer
.
truncated_normal_initializer
.
stddev
))
self
.
assertAlmostEqual
((
hyperparams_proto
.
initializer
.
truncated_normal_initializer
.
mean
),
(
conv_hyperparams_actual
.
initializer
.
truncated_normal_initializer
.
mean
))
self
.
assertEqual
(
hyperparams_proto
.
activation
,
conv_hyperparams_actual
.
activation
)
self
.
assertFalse
(
is_training
)
def
test_non_default_rfcn_box_predictor
(
self
):
conv_hyperparams_text_proto
=
"""
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
activation: RELU_6
"""
box_predictor_text_proto
=
"""
rfcn_box_predictor {
num_spatial_bins_height: 4
num_spatial_bins_width: 4
depth: 4
box_code_size: 3
crop_height: 16
crop_width: 16
}
"""
hyperparams_proto
=
hyperparams_pb2
.
Hyperparams
()
text_format
.
Merge
(
conv_hyperparams_text_proto
,
hyperparams_proto
)
def
mock_conv_argscope_builder
(
conv_hyperparams_arg
,
is_training
):
return
(
conv_hyperparams_arg
,
is_training
)
box_predictor_proto
=
box_predictor_pb2
.
BoxPredictor
()
text_format
.
Merge
(
box_predictor_text_proto
,
box_predictor_proto
)
box_predictor_proto
.
rfcn_box_predictor
.
conv_hyperparams
.
CopyFrom
(
hyperparams_proto
)
box_predictor
=
box_predictor_builder
.
build
(
argscope_fn
=
mock_conv_argscope_builder
,
box_predictor_config
=
box_predictor_proto
,
is_training
=
True
,
num_classes
=
90
)
self
.
assertEqual
(
box_predictor
.
num_classes
,
90
)
self
.
assertTrue
(
box_predictor
.
_is_training
)
self
.
assertEqual
(
box_predictor
.
_box_code_size
,
3
)
self
.
assertEqual
(
box_predictor
.
_num_spatial_bins
,
[
4
,
4
])
self
.
assertEqual
(
box_predictor
.
_crop_size
,
[
16
,
16
])
def
test_default_rfcn_box_predictor
(
self
):
conv_hyperparams_text_proto
=
"""
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
activation: RELU_6
"""
hyperparams_proto
=
hyperparams_pb2
.
Hyperparams
()
text_format
.
Merge
(
conv_hyperparams_text_proto
,
hyperparams_proto
)
def
mock_conv_argscope_builder
(
conv_hyperparams_arg
,
is_training
):
return
(
conv_hyperparams_arg
,
is_training
)
box_predictor_proto
=
box_predictor_pb2
.
BoxPredictor
()
box_predictor_proto
.
rfcn_box_predictor
.
conv_hyperparams
.
CopyFrom
(
hyperparams_proto
)
box_predictor
=
box_predictor_builder
.
build
(
argscope_fn
=
mock_conv_argscope_builder
,
box_predictor_config
=
box_predictor_proto
,
is_training
=
True
,
num_classes
=
90
)
self
.
assertEqual
(
box_predictor
.
num_classes
,
90
)
self
.
assertTrue
(
box_predictor
.
_is_training
)
self
.
assertEqual
(
box_predictor
.
_box_code_size
,
4
)
self
.
assertEqual
(
box_predictor
.
_num_spatial_bins
,
[
3
,
3
])
self
.
assertEqual
(
box_predictor
.
_crop_size
,
[
12
,
12
])
if
__name__
==
'__main__'
:
tf
.
test
.
main
()
object_detection/builders/hyperparams_builder.py
0 → 100644
View file @
44fa1d37
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Builder function to construct tf-slim arg_scope for convolution, fc ops."""
import
tensorflow
as
tf
from
object_detection.protos
import
hyperparams_pb2
slim
=
tf
.
contrib
.
slim
def
build
(
hyperparams_config
,
is_training
):
"""Builds tf-slim arg_scope for convolution ops based on the config.
Returns an arg_scope to use for convolution ops containing weights
initializer, weights regularizer, activation function, batch norm function
and batch norm parameters based on the configuration.
Note that if the batch_norm parameteres are not specified in the config
(i.e. left to default) then batch norm is excluded from the arg_scope.
The batch norm parameters are set for updates based on `is_training` argument
and conv_hyperparams_config.batch_norm.train parameter. During training, they
are updated only if batch_norm.train parameter is true. However, during eval,
no updates are made to the batch norm variables. In both cases, their current
values are used during forward pass.
Args:
hyperparams_config: hyperparams.proto object containing
hyperparameters.
is_training: Whether the network is in training mode.
Returns:
arg_scope: tf-slim arg_scope containing hyperparameters for ops.
Raises:
ValueError: if hyperparams_config is not of type hyperparams.Hyperparams.
"""
if
not
isinstance
(
hyperparams_config
,
hyperparams_pb2
.
Hyperparams
):
raise
ValueError
(
'hyperparams_config not of type '
'hyperparams_pb.Hyperparams.'
)
batch_norm
=
None
batch_norm_params
=
None
if
hyperparams_config
.
HasField
(
'batch_norm'
):
batch_norm
=
slim
.
batch_norm
batch_norm_params
=
_build_batch_norm_params
(
hyperparams_config
.
batch_norm
,
is_training
)
affected_ops
=
[
slim
.
conv2d
,
slim
.
separable_conv2d
,
slim
.
conv2d_transpose
]
if
hyperparams_config
.
HasField
(
'op'
)
and
(
hyperparams_config
.
op
==
hyperparams_pb2
.
Hyperparams
.
FC
):
affected_ops
=
[
slim
.
fully_connected
]
with
slim
.
arg_scope
(
affected_ops
,
weights_regularizer
=
_build_regularizer
(
hyperparams_config
.
regularizer
),
weights_initializer
=
_build_initializer
(
hyperparams_config
.
initializer
),
activation_fn
=
_build_activation_fn
(
hyperparams_config
.
activation
),
normalizer_fn
=
batch_norm
,
normalizer_params
=
batch_norm_params
)
as
sc
:
return
sc
def
_build_activation_fn
(
activation_fn
):
"""Builds a callable activation from config.
Args:
activation_fn: hyperparams_pb2.Hyperparams.activation
Returns:
Callable activation function.
Raises:
ValueError: On unknown activation function.
"""
if
activation_fn
==
hyperparams_pb2
.
Hyperparams
.
NONE
:
return
None
if
activation_fn
==
hyperparams_pb2
.
Hyperparams
.
RELU
:
return
tf
.
nn
.
relu
if
activation_fn
==
hyperparams_pb2
.
Hyperparams
.
RELU_6
:
return
tf
.
nn
.
relu6
raise
ValueError
(
'Unknown activation function: {}'
.
format
(
activation_fn
))
def
_build_regularizer
(
regularizer
):
"""Builds a tf-slim regularizer from config.
Args:
regularizer: hyperparams_pb2.Hyperparams.regularizer proto.
Returns:
tf-slim regularizer.
Raises:
ValueError: On unknown regularizer.
"""
regularizer_oneof
=
regularizer
.
WhichOneof
(
'regularizer_oneof'
)
if
regularizer_oneof
==
'l1_regularizer'
:
return
slim
.
l1_regularizer
(
scale
=
float
(
regularizer
.
l1_regularizer
.
weight
))
if
regularizer_oneof
==
'l2_regularizer'
:
return
slim
.
l2_regularizer
(
scale
=
float
(
regularizer
.
l2_regularizer
.
weight
))
raise
ValueError
(
'Unknown regularizer function: {}'
.
format
(
regularizer_oneof
))
def
_build_initializer
(
initializer
):
"""Build a tf initializer from config.
Args:
initializer: hyperparams_pb2.Hyperparams.regularizer proto.
Returns:
tf initializer.
Raises:
ValueError: On unknown initializer.
"""
initializer_oneof
=
initializer
.
WhichOneof
(
'initializer_oneof'
)
if
initializer_oneof
==
'truncated_normal_initializer'
:
return
tf
.
truncated_normal_initializer
(
mean
=
initializer
.
truncated_normal_initializer
.
mean
,
stddev
=
initializer
.
truncated_normal_initializer
.
stddev
)
if
initializer_oneof
==
'variance_scaling_initializer'
:
enum_descriptor
=
(
hyperparams_pb2
.
VarianceScalingInitializer
.
DESCRIPTOR
.
enum_types_by_name
[
'Mode'
])
mode
=
enum_descriptor
.
values_by_number
[
initializer
.
variance_scaling_initializer
.
mode
].
name
return
slim
.
variance_scaling_initializer
(
factor
=
initializer
.
variance_scaling_initializer
.
factor
,
mode
=
mode
,
uniform
=
initializer
.
variance_scaling_initializer
.
uniform
)
raise
ValueError
(
'Unknown initializer function: {}'
.
format
(
initializer_oneof
))
def
_build_batch_norm_params
(
batch_norm
,
is_training
):
"""Build a dictionary of batch_norm params from config.
Args:
batch_norm: hyperparams_pb2.ConvHyperparams.batch_norm proto.
is_training: Whether the models is in training mode.
Returns:
A dictionary containing batch_norm parameters.
"""
batch_norm_params
=
{
'decay'
:
batch_norm
.
decay
,
'center'
:
batch_norm
.
center
,
'scale'
:
batch_norm
.
scale
,
'epsilon'
:
batch_norm
.
epsilon
,
'fused'
:
True
,
'is_training'
:
is_training
and
batch_norm
.
train
,
}
return
batch_norm_params
object_detection/builders/hyperparams_builder_test.py
0 → 100644
View file @
44fa1d37
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests object_detection.core.hyperparams_builder."""
import
numpy
as
np
import
tensorflow
as
tf
from
google.protobuf
import
text_format
# TODO: Rewrite third_party imports.
from
object_detection.builders
import
hyperparams_builder
from
object_detection.protos
import
hyperparams_pb2
slim
=
tf
.
contrib
.
slim
class
HyperparamsBuilderTest
(
tf
.
test
.
TestCase
):
# TODO: Make this a public api in slim arg_scope.py.
def
_get_scope_key
(
self
,
op
):
return
getattr
(
op
,
'_key_op'
,
str
(
op
))
def
test_default_arg_scope_has_conv2d_op
(
self
):
conv_hyperparams_text_proto
=
"""
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto
=
hyperparams_pb2
.
Hyperparams
()
text_format
.
Merge
(
conv_hyperparams_text_proto
,
conv_hyperparams_proto
)
scope
=
hyperparams_builder
.
build
(
conv_hyperparams_proto
,
is_training
=
True
)
self
.
assertTrue
(
self
.
_get_scope_key
(
slim
.
conv2d
)
in
scope
)
def
test_default_arg_scope_has_separable_conv2d_op
(
self
):
conv_hyperparams_text_proto
=
"""
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto
=
hyperparams_pb2
.
Hyperparams
()
text_format
.
Merge
(
conv_hyperparams_text_proto
,
conv_hyperparams_proto
)
scope
=
hyperparams_builder
.
build
(
conv_hyperparams_proto
,
is_training
=
True
)
self
.
assertTrue
(
self
.
_get_scope_key
(
slim
.
separable_conv2d
)
in
scope
)
def
test_default_arg_scope_has_conv2d_transpose_op
(
self
):
conv_hyperparams_text_proto
=
"""
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto
=
hyperparams_pb2
.
Hyperparams
()
text_format
.
Merge
(
conv_hyperparams_text_proto
,
conv_hyperparams_proto
)
scope
=
hyperparams_builder
.
build
(
conv_hyperparams_proto
,
is_training
=
True
)
self
.
assertTrue
(
self
.
_get_scope_key
(
slim
.
conv2d_transpose
)
in
scope
)
def
test_explicit_fc_op_arg_scope_has_fully_connected_op
(
self
):
conv_hyperparams_text_proto
=
"""
op: FC
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto
=
hyperparams_pb2
.
Hyperparams
()
text_format
.
Merge
(
conv_hyperparams_text_proto
,
conv_hyperparams_proto
)
scope
=
hyperparams_builder
.
build
(
conv_hyperparams_proto
,
is_training
=
True
)
self
.
assertTrue
(
self
.
_get_scope_key
(
slim
.
fully_connected
)
in
scope
)
def
test_separable_conv2d_and_conv2d_and_transpose_have_same_parameters
(
self
):
conv_hyperparams_text_proto
=
"""
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto
=
hyperparams_pb2
.
Hyperparams
()
text_format
.
Merge
(
conv_hyperparams_text_proto
,
conv_hyperparams_proto
)
scope
=
hyperparams_builder
.
build
(
conv_hyperparams_proto
,
is_training
=
True
)
kwargs_1
,
kwargs_2
,
kwargs_3
=
scope
.
values
()
self
.
assertDictEqual
(
kwargs_1
,
kwargs_2
)
self
.
assertDictEqual
(
kwargs_1
,
kwargs_3
)
def
test_return_l1_regularized_weights
(
self
):
conv_hyperparams_text_proto
=
"""
regularizer {
l1_regularizer {
weight: 0.5
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto
=
hyperparams_pb2
.
Hyperparams
()
text_format
.
Merge
(
conv_hyperparams_text_proto
,
conv_hyperparams_proto
)
scope
=
hyperparams_builder
.
build
(
conv_hyperparams_proto
,
is_training
=
True
)
conv_scope_arguments
=
scope
.
values
()[
0
]
regularizer
=
conv_scope_arguments
[
'weights_regularizer'
]
weights
=
np
.
array
([
1.
,
-
1
,
4.
,
2.
])
with
self
.
test_session
()
as
sess
:
result
=
sess
.
run
(
regularizer
(
tf
.
constant
(
weights
)))
self
.
assertAllClose
(
np
.
abs
(
weights
).
sum
()
*
0.5
,
result
)
def
test_return_l2_regularizer_weights
(
self
):
conv_hyperparams_text_proto
=
"""
regularizer {
l2_regularizer {
weight: 0.42
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto
=
hyperparams_pb2
.
Hyperparams
()
text_format
.
Merge
(
conv_hyperparams_text_proto
,
conv_hyperparams_proto
)
scope
=
hyperparams_builder
.
build
(
conv_hyperparams_proto
,
is_training
=
True
)
conv_scope_arguments
=
scope
.
values
()[
0
]
regularizer
=
conv_scope_arguments
[
'weights_regularizer'
]
weights
=
np
.
array
([
1.
,
-
1
,
4.
,
2.
])
with
self
.
test_session
()
as
sess
:
result
=
sess
.
run
(
regularizer
(
tf
.
constant
(
weights
)))
self
.
assertAllClose
(
np
.
power
(
weights
,
2
).
sum
()
/
2.0
*
0.42
,
result
)
def
test_return_non_default_batch_norm_params_with_train_during_train
(
self
):
conv_hyperparams_text_proto
=
"""
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
batch_norm {
decay: 0.7
center: false
scale: true
epsilon: 0.03
train: true
}
"""
conv_hyperparams_proto
=
hyperparams_pb2
.
Hyperparams
()
text_format
.
Merge
(
conv_hyperparams_text_proto
,
conv_hyperparams_proto
)
scope
=
hyperparams_builder
.
build
(
conv_hyperparams_proto
,
is_training
=
True
)
conv_scope_arguments
=
scope
.
values
()[
0
]
self
.
assertEqual
(
conv_scope_arguments
[
'normalizer_fn'
],
slim
.
batch_norm
)
batch_norm_params
=
conv_scope_arguments
[
'normalizer_params'
]
self
.
assertAlmostEqual
(
batch_norm_params
[
'decay'
],
0.7
)
self
.
assertAlmostEqual
(
batch_norm_params
[
'epsilon'
],
0.03
)
self
.
assertFalse
(
batch_norm_params
[
'center'
])
self
.
assertTrue
(
batch_norm_params
[
'scale'
])
self
.
assertTrue
(
batch_norm_params
[
'is_training'
])
def
test_return_batch_norm_params_with_notrain_during_eval
(
self
):
conv_hyperparams_text_proto
=
"""
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
batch_norm {
decay: 0.7
center: false
scale: true
epsilon: 0.03
train: true
}
"""
conv_hyperparams_proto
=
hyperparams_pb2
.
Hyperparams
()
text_format
.
Merge
(
conv_hyperparams_text_proto
,
conv_hyperparams_proto
)
scope
=
hyperparams_builder
.
build
(
conv_hyperparams_proto
,
is_training
=
False
)
conv_scope_arguments
=
scope
.
values
()[
0
]
self
.
assertEqual
(
conv_scope_arguments
[
'normalizer_fn'
],
slim
.
batch_norm
)
batch_norm_params
=
conv_scope_arguments
[
'normalizer_params'
]
self
.
assertAlmostEqual
(
batch_norm_params
[
'decay'
],
0.7
)
self
.
assertAlmostEqual
(
batch_norm_params
[
'epsilon'
],
0.03
)
self
.
assertFalse
(
batch_norm_params
[
'center'
])
self
.
assertTrue
(
batch_norm_params
[
'scale'
])
self
.
assertFalse
(
batch_norm_params
[
'is_training'
])
def
test_return_batch_norm_params_with_notrain_when_train_is_false
(
self
):
conv_hyperparams_text_proto
=
"""
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
batch_norm {
decay: 0.7
center: false
scale: true
epsilon: 0.03
train: false
}
"""
conv_hyperparams_proto
=
hyperparams_pb2
.
Hyperparams
()
text_format
.
Merge
(
conv_hyperparams_text_proto
,
conv_hyperparams_proto
)
scope
=
hyperparams_builder
.
build
(
conv_hyperparams_proto
,
is_training
=
True
)
conv_scope_arguments
=
scope
.
values
()[
0
]
self
.
assertEqual
(
conv_scope_arguments
[
'normalizer_fn'
],
slim
.
batch_norm
)
batch_norm_params
=
conv_scope_arguments
[
'normalizer_params'
]
self
.
assertAlmostEqual
(
batch_norm_params
[
'decay'
],
0.7
)
self
.
assertAlmostEqual
(
batch_norm_params
[
'epsilon'
],
0.03
)
self
.
assertFalse
(
batch_norm_params
[
'center'
])
self
.
assertTrue
(
batch_norm_params
[
'scale'
])
self
.
assertFalse
(
batch_norm_params
[
'is_training'
])
def
test_do_not_use_batch_norm_if_default
(
self
):
conv_hyperparams_text_proto
=
"""
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto
=
hyperparams_pb2
.
Hyperparams
()
text_format
.
Merge
(
conv_hyperparams_text_proto
,
conv_hyperparams_proto
)
scope
=
hyperparams_builder
.
build
(
conv_hyperparams_proto
,
is_training
=
True
)
conv_scope_arguments
=
scope
.
values
()[
0
]
self
.
assertEqual
(
conv_scope_arguments
[
'normalizer_fn'
],
None
)
self
.
assertEqual
(
conv_scope_arguments
[
'normalizer_params'
],
None
)
def
test_use_none_activation
(
self
):
conv_hyperparams_text_proto
=
"""
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
activation: NONE
"""
conv_hyperparams_proto
=
hyperparams_pb2
.
Hyperparams
()
text_format
.
Merge
(
conv_hyperparams_text_proto
,
conv_hyperparams_proto
)
scope
=
hyperparams_builder
.
build
(
conv_hyperparams_proto
,
is_training
=
True
)
conv_scope_arguments
=
scope
.
values
()[
0
]
self
.
assertEqual
(
conv_scope_arguments
[
'activation_fn'
],
None
)
def
test_use_relu_activation
(
self
):
conv_hyperparams_text_proto
=
"""
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
activation: RELU
"""
conv_hyperparams_proto
=
hyperparams_pb2
.
Hyperparams
()
text_format
.
Merge
(
conv_hyperparams_text_proto
,
conv_hyperparams_proto
)
scope
=
hyperparams_builder
.
build
(
conv_hyperparams_proto
,
is_training
=
True
)
conv_scope_arguments
=
scope
.
values
()[
0
]
self
.
assertEqual
(
conv_scope_arguments
[
'activation_fn'
],
tf
.
nn
.
relu
)
def
test_use_relu_6_activation
(
self
):
conv_hyperparams_text_proto
=
"""
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
activation: RELU_6
"""
conv_hyperparams_proto
=
hyperparams_pb2
.
Hyperparams
()
text_format
.
Merge
(
conv_hyperparams_text_proto
,
conv_hyperparams_proto
)
scope
=
hyperparams_builder
.
build
(
conv_hyperparams_proto
,
is_training
=
True
)
conv_scope_arguments
=
scope
.
values
()[
0
]
self
.
assertEqual
(
conv_scope_arguments
[
'activation_fn'
],
tf
.
nn
.
relu6
)
def
_assert_variance_in_range
(
self
,
initializer
,
shape
,
variance
,
tol
=
1e-2
):
with
tf
.
Graph
().
as_default
()
as
g
:
with
self
.
test_session
(
graph
=
g
)
as
sess
:
var
=
tf
.
get_variable
(
name
=
'test'
,
shape
=
shape
,
dtype
=
tf
.
float32
,
initializer
=
initializer
)
sess
.
run
(
tf
.
global_variables_initializer
())
values
=
sess
.
run
(
var
)
self
.
assertAllClose
(
np
.
var
(
values
),
variance
,
tol
,
tol
)
def
test_variance_in_range_with_variance_scaling_initializer_fan_in
(
self
):
conv_hyperparams_text_proto
=
"""
regularizer {
l2_regularizer {
}
}
initializer {
variance_scaling_initializer {
factor: 2.0
mode: FAN_IN
uniform: false
}
}
"""
conv_hyperparams_proto
=
hyperparams_pb2
.
Hyperparams
()
text_format
.
Merge
(
conv_hyperparams_text_proto
,
conv_hyperparams_proto
)
scope
=
hyperparams_builder
.
build
(
conv_hyperparams_proto
,
is_training
=
True
)
conv_scope_arguments
=
scope
.
values
()[
0
]
initializer
=
conv_scope_arguments
[
'weights_initializer'
]
self
.
_assert_variance_in_range
(
initializer
,
shape
=
[
100
,
40
],
variance
=
2.
/
100.
)
def
test_variance_in_range_with_variance_scaling_initializer_fan_out
(
self
):
conv_hyperparams_text_proto
=
"""
regularizer {
l2_regularizer {
}
}
initializer {
variance_scaling_initializer {
factor: 2.0
mode: FAN_OUT
uniform: false
}
}
"""
conv_hyperparams_proto
=
hyperparams_pb2
.
Hyperparams
()
text_format
.
Merge
(
conv_hyperparams_text_proto
,
conv_hyperparams_proto
)
scope
=
hyperparams_builder
.
build
(
conv_hyperparams_proto
,
is_training
=
True
)
conv_scope_arguments
=
scope
.
values
()[
0
]
initializer
=
conv_scope_arguments
[
'weights_initializer'
]
self
.
_assert_variance_in_range
(
initializer
,
shape
=
[
100
,
40
],
variance
=
2.
/
40.
)
def
test_variance_in_range_with_variance_scaling_initializer_fan_avg
(
self
):
conv_hyperparams_text_proto
=
"""
regularizer {
l2_regularizer {
}
}
initializer {
variance_scaling_initializer {
factor: 2.0
mode: FAN_AVG
uniform: false
}
}
"""
conv_hyperparams_proto
=
hyperparams_pb2
.
Hyperparams
()
text_format
.
Merge
(
conv_hyperparams_text_proto
,
conv_hyperparams_proto
)
scope
=
hyperparams_builder
.
build
(
conv_hyperparams_proto
,
is_training
=
True
)
conv_scope_arguments
=
scope
.
values
()[
0
]
initializer
=
conv_scope_arguments
[
'weights_initializer'
]
self
.
_assert_variance_in_range
(
initializer
,
shape
=
[
100
,
40
],
variance
=
4.
/
(
100.
+
40.
))
def
test_variance_in_range_with_variance_scaling_initializer_uniform
(
self
):
conv_hyperparams_text_proto
=
"""
regularizer {
l2_regularizer {
}
}
initializer {
variance_scaling_initializer {
factor: 2.0
mode: FAN_IN
uniform: true
}
}
"""
conv_hyperparams_proto
=
hyperparams_pb2
.
Hyperparams
()
text_format
.
Merge
(
conv_hyperparams_text_proto
,
conv_hyperparams_proto
)
scope
=
hyperparams_builder
.
build
(
conv_hyperparams_proto
,
is_training
=
True
)
conv_scope_arguments
=
scope
.
values
()[
0
]
initializer
=
conv_scope_arguments
[
'weights_initializer'
]
self
.
_assert_variance_in_range
(
initializer
,
shape
=
[
100
,
40
],
variance
=
2.
/
100.
)
def
test_variance_in_range_with_truncated_normal_initializer
(
self
):
conv_hyperparams_text_proto
=
"""
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
mean: 0.0
stddev: 0.8
}
}
"""
conv_hyperparams_proto
=
hyperparams_pb2
.
Hyperparams
()
text_format
.
Merge
(
conv_hyperparams_text_proto
,
conv_hyperparams_proto
)
scope
=
hyperparams_builder
.
build
(
conv_hyperparams_proto
,
is_training
=
True
)
conv_scope_arguments
=
scope
.
values
()[
0
]
initializer
=
conv_scope_arguments
[
'weights_initializer'
]
self
.
_assert_variance_in_range
(
initializer
,
shape
=
[
100
,
40
],
variance
=
0.49
,
tol
=
1e-1
)
if
__name__
==
'__main__'
:
tf
.
test
.
main
()
object_detection/builders/image_resizer_builder.py
0 → 100644
View file @
44fa1d37
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Builder function for image resizing operations."""
import
functools
from
object_detection.core
import
preprocessor
from
object_detection.protos
import
image_resizer_pb2
def
build
(
image_resizer_config
):
"""Builds callable for image resizing operations.
Args:
image_resizer_config: image_resizer.proto object containing parameters for
an image resizing operation.
Returns:
image_resizer_fn: Callable for image resizing. This callable always takes
a rank-3 image tensor (corresponding to a single image) and returns a
rank-3 image tensor, possibly with new spatial dimensions.
Raises:
ValueError: if `image_resizer_config` is of incorrect type.
ValueError: if `image_resizer_config.image_resizer_oneof` is of expected
type.
ValueError: if min_dimension > max_dimension when keep_aspect_ratio_resizer
is used.
"""
if
not
isinstance
(
image_resizer_config
,
image_resizer_pb2
.
ImageResizer
):
raise
ValueError
(
'image_resizer_config not of type '
'image_resizer_pb2.ImageResizer.'
)
if
image_resizer_config
.
WhichOneof
(
'image_resizer_oneof'
)
==
'keep_aspect_ratio_resizer'
:
keep_aspect_ratio_config
=
image_resizer_config
.
keep_aspect_ratio_resizer
if
not
(
keep_aspect_ratio_config
.
min_dimension
<=
keep_aspect_ratio_config
.
max_dimension
):
raise
ValueError
(
'min_dimension > max_dimension'
)
return
functools
.
partial
(
preprocessor
.
resize_to_range
,
min_dimension
=
keep_aspect_ratio_config
.
min_dimension
,
max_dimension
=
keep_aspect_ratio_config
.
max_dimension
)
if
image_resizer_config
.
WhichOneof
(
'image_resizer_oneof'
)
==
'fixed_shape_resizer'
:
fixed_shape_resizer_config
=
image_resizer_config
.
fixed_shape_resizer
return
functools
.
partial
(
preprocessor
.
resize_image
,
new_height
=
fixed_shape_resizer_config
.
height
,
new_width
=
fixed_shape_resizer_config
.
width
)
raise
ValueError
(
'Invalid image resizer option.'
)
object_detection/builders/image_resizer_builder_test.py
0 → 100644
View file @
44fa1d37
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.builders.image_resizer_builder."""
import
tensorflow
as
tf
from
google.protobuf
import
text_format
from
object_detection.builders
import
image_resizer_builder
from
object_detection.protos
import
image_resizer_pb2
class
ImageResizerBuilderTest
(
tf
.
test
.
TestCase
):
def
_shape_of_resized_random_image_given_text_proto
(
self
,
input_shape
,
text_proto
):
image_resizer_config
=
image_resizer_pb2
.
ImageResizer
()
text_format
.
Merge
(
text_proto
,
image_resizer_config
)
image_resizer_fn
=
image_resizer_builder
.
build
(
image_resizer_config
)
images
=
tf
.
to_float
(
tf
.
random_uniform
(
input_shape
,
minval
=
0
,
maxval
=
255
,
dtype
=
tf
.
int32
))
resized_images
=
image_resizer_fn
(
images
)
with
self
.
test_session
()
as
sess
:
return
sess
.
run
(
resized_images
).
shape
def
test_built_keep_aspect_ratio_resizer_returns_expected_shape
(
self
):
image_resizer_text_proto
=
"""
keep_aspect_ratio_resizer {
min_dimension: 10
max_dimension: 20
}
"""
input_shape
=
(
50
,
25
,
3
)
expected_output_shape
=
(
20
,
10
,
3
)
output_shape
=
self
.
_shape_of_resized_random_image_given_text_proto
(
input_shape
,
image_resizer_text_proto
)
self
.
assertEqual
(
output_shape
,
expected_output_shape
)
def
test_built_fixed_shape_resizer_returns_expected_shape
(
self
):
image_resizer_text_proto
=
"""
fixed_shape_resizer {
height: 10
width: 20
}
"""
input_shape
=
(
50
,
25
,
3
)
expected_output_shape
=
(
10
,
20
,
3
)
output_shape
=
self
.
_shape_of_resized_random_image_given_text_proto
(
input_shape
,
image_resizer_text_proto
)
self
.
assertEqual
(
output_shape
,
expected_output_shape
)
def
test_raises_error_on_invalid_input
(
self
):
invalid_input
=
'invalid_input'
with
self
.
assertRaises
(
ValueError
):
image_resizer_builder
.
build
(
invalid_input
)
if
__name__
==
'__main__'
:
tf
.
test
.
main
()
object_detection/builders/input_reader_builder.py
0 → 100644
View file @
44fa1d37
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Input reader builder.
Creates data sources for DetectionModels from an InputReader config. See
input_reader.proto for options.
Note: If users wishes to also use their own InputReaders with the Object
Detection configuration framework, they should define their own builder function
that wraps the build function.
"""
import
tensorflow
as
tf
from
object_detection.data_decoders
import
tf_example_decoder
from
object_detection.protos
import
input_reader_pb2
parallel_reader
=
tf
.
contrib
.
slim
.
parallel_reader
def
build
(
input_reader_config
):
"""Builds a tensor dictionary based on the InputReader config.
Args:
input_reader_config: A input_reader_pb2.InputReader object.
Returns:
A tensor dict based on the input_reader_config.
Raises:
ValueError: On invalid input reader proto.
"""
if
not
isinstance
(
input_reader_config
,
input_reader_pb2
.
InputReader
):
raise
ValueError
(
'input_reader_config not of type '
'input_reader_pb2.InputReader.'
)
if
input_reader_config
.
WhichOneof
(
'input_reader'
)
==
'tf_record_input_reader'
:
config
=
input_reader_config
.
tf_record_input_reader
_
,
string_tensor
=
parallel_reader
.
parallel_read
(
config
.
input_path
,
reader_class
=
tf
.
TFRecordReader
,
num_epochs
=
(
input_reader_config
.
num_epochs
if
input_reader_config
.
num_epochs
else
None
),
num_readers
=
input_reader_config
.
num_readers
,
shuffle
=
input_reader_config
.
shuffle
,
dtypes
=
[
tf
.
string
,
tf
.
string
],
capacity
=
input_reader_config
.
queue_capacity
,
min_after_dequeue
=
input_reader_config
.
min_after_dequeue
)
return
tf_example_decoder
.
TfExampleDecoder
().
decode
(
string_tensor
)
raise
ValueError
(
'Unsupported input_reader_config.'
)
object_detection/builders/input_reader_builder_test.py
0 → 100644
View file @
44fa1d37
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for input_reader_builder."""
import
os
import
numpy
as
np
import
tensorflow
as
tf
from
google.protobuf
import
text_format
from
tensorflow.core.example
import
example_pb2
from
tensorflow.core.example
import
feature_pb2
from
object_detection.builders
import
input_reader_builder
from
object_detection.core
import
standard_fields
as
fields
from
object_detection.protos
import
input_reader_pb2
class
InputReaderBuilderTest
(
tf
.
test
.
TestCase
):
def
create_tf_record
(
self
):
path
=
os
.
path
.
join
(
self
.
get_temp_dir
(),
'tfrecord'
)
writer
=
tf
.
python_io
.
TFRecordWriter
(
path
)
image_tensor
=
np
.
random
.
randint
(
255
,
size
=
(
4
,
5
,
3
)).
astype
(
np
.
uint8
)
with
self
.
test_session
():
encoded_jpeg
=
tf
.
image
.
encode_jpeg
(
tf
.
constant
(
image_tensor
)).
eval
()
example
=
example_pb2
.
Example
(
features
=
feature_pb2
.
Features
(
feature
=
{
'image/encoded'
:
feature_pb2
.
Feature
(
bytes_list
=
feature_pb2
.
BytesList
(
value
=
[
encoded_jpeg
])),
'image/format'
:
feature_pb2
.
Feature
(
bytes_list
=
feature_pb2
.
BytesList
(
value
=
[
'jpeg'
.
encode
(
'utf-8'
)])),
'image/object/bbox/xmin'
:
feature_pb2
.
Feature
(
float_list
=
feature_pb2
.
FloatList
(
value
=
[
0.0
])),
'image/object/bbox/xmax'
:
feature_pb2
.
Feature
(
float_list
=
feature_pb2
.
FloatList
(
value
=
[
1.0
])),
'image/object/bbox/ymin'
:
feature_pb2
.
Feature
(
float_list
=
feature_pb2
.
FloatList
(
value
=
[
0.0
])),
'image/object/bbox/ymax'
:
feature_pb2
.
Feature
(
float_list
=
feature_pb2
.
FloatList
(
value
=
[
1.0
])),
'image/object/class/label'
:
feature_pb2
.
Feature
(
int64_list
=
feature_pb2
.
Int64List
(
value
=
[
2
])),
}))
writer
.
write
(
example
.
SerializeToString
())
writer
.
close
()
return
path
def
test_build_tf_record_input_reader
(
self
):
tf_record_path
=
self
.
create_tf_record
()
input_reader_text_proto
=
"""
shuffle: false
num_readers: 1
tf_record_input_reader {{
input_path: '{0}'
}}
"""
.
format
(
tf_record_path
)
input_reader_proto
=
input_reader_pb2
.
InputReader
()
text_format
.
Merge
(
input_reader_text_proto
,
input_reader_proto
)
tensor_dict
=
input_reader_builder
.
build
(
input_reader_proto
)
sv
=
tf
.
train
.
Supervisor
(
logdir
=
self
.
get_temp_dir
())
with
sv
.
prepare_or_wait_for_session
()
as
sess
:
sv
.
start_queue_runners
(
sess
)
output_dict
=
sess
.
run
(
tensor_dict
)
self
.
assertEquals
(
(
4
,
5
,
3
),
output_dict
[
fields
.
InputDataFields
.
image
].
shape
)
self
.
assertEquals
(
[
2
],
output_dict
[
fields
.
InputDataFields
.
groundtruth_classes
])
self
.
assertEquals
(
(
1
,
4
),
output_dict
[
fields
.
InputDataFields
.
groundtruth_boxes
].
shape
)
self
.
assertAllEqual
(
[
0.0
,
0.0
,
1.0
,
1.0
],
output_dict
[
fields
.
InputDataFields
.
groundtruth_boxes
][
0
])
if
__name__
==
'__main__'
:
tf
.
test
.
main
()
object_detection/builders/losses_builder.py
0 → 100644
View file @
44fa1d37
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A function to build localization and classification losses from config."""
from
object_detection.core
import
losses
from
object_detection.protos
import
losses_pb2
def
build
(
loss_config
):
"""Build losses based on the config.
Builds classification, localization losses and optionally a hard example miner
based on the config.
Args:
loss_config: A losses_pb2.Loss object.
Returns:
classification_loss: Classification loss object.
localization_loss: Localization loss object.
classification_weight: Classification loss weight.
localization_weight: Localization loss weight.
hard_example_miner: Hard example miner object.
"""
classification_loss
=
_build_classification_loss
(
loss_config
.
classification_loss
)
localization_loss
=
_build_localization_loss
(
loss_config
.
localization_loss
)
classification_weight
=
loss_config
.
classification_weight
localization_weight
=
loss_config
.
localization_weight
hard_example_miner
=
None
if
loss_config
.
HasField
(
'hard_example_miner'
):
hard_example_miner
=
build_hard_example_miner
(
loss_config
.
hard_example_miner
,
classification_weight
,
localization_weight
)
return
(
classification_loss
,
localization_loss
,
classification_weight
,
localization_weight
,
hard_example_miner
)
def
build_hard_example_miner
(
config
,
classification_weight
,
localization_weight
):
"""Builds hard example miner based on the config.
Args:
config: A losses_pb2.HardExampleMiner object.
classification_weight: Classification loss weight.
localization_weight: Localization loss weight.
Returns:
Hard example miner.
"""
loss_type
=
None
if
config
.
loss_type
==
losses_pb2
.
HardExampleMiner
.
BOTH
:
loss_type
=
'both'
if
config
.
loss_type
==
losses_pb2
.
HardExampleMiner
.
CLASSIFICATION
:
loss_type
=
'cls'
if
config
.
loss_type
==
losses_pb2
.
HardExampleMiner
.
LOCALIZATION
:
loss_type
=
'loc'
max_negatives_per_positive
=
None
num_hard_examples
=
None
if
config
.
max_negatives_per_positive
>
0
:
max_negatives_per_positive
=
config
.
max_negatives_per_positive
if
config
.
num_hard_examples
>
0
:
num_hard_examples
=
config
.
num_hard_examples
hard_example_miner
=
losses
.
HardExampleMiner
(
num_hard_examples
=
num_hard_examples
,
iou_threshold
=
config
.
iou_threshold
,
loss_type
=
loss_type
,
cls_loss_weight
=
classification_weight
,
loc_loss_weight
=
localization_weight
,
max_negatives_per_positive
=
max_negatives_per_positive
,
min_negatives_per_image
=
config
.
min_negatives_per_image
)
return
hard_example_miner
def
_build_localization_loss
(
loss_config
):
"""Builds a localization loss based on the loss config.
Args:
loss_config: A losses_pb2.LocalizationLoss object.
Returns:
Loss based on the config.
Raises:
ValueError: On invalid loss_config.
"""
if
not
isinstance
(
loss_config
,
losses_pb2
.
LocalizationLoss
):
raise
ValueError
(
'loss_config not of type losses_pb2.LocalizationLoss.'
)
loss_type
=
loss_config
.
WhichOneof
(
'localization_loss'
)
if
loss_type
==
'weighted_l2'
:
config
=
loss_config
.
weighted_l2
return
losses
.
WeightedL2LocalizationLoss
(
anchorwise_output
=
config
.
anchorwise_output
)
if
loss_type
==
'weighted_smooth_l1'
:
config
=
loss_config
.
weighted_smooth_l1
return
losses
.
WeightedSmoothL1LocalizationLoss
(
anchorwise_output
=
config
.
anchorwise_output
)
if
loss_type
==
'weighted_iou'
:
return
losses
.
WeightedIOULocalizationLoss
()
raise
ValueError
(
'Empty loss config.'
)
def
_build_classification_loss
(
loss_config
):
"""Builds a classification loss based on the loss config.
Args:
loss_config: A losses_pb2.ClassificationLoss object.
Returns:
Loss based on the config.
Raises:
ValueError: On invalid loss_config.
"""
if
not
isinstance
(
loss_config
,
losses_pb2
.
ClassificationLoss
):
raise
ValueError
(
'loss_config not of type losses_pb2.ClassificationLoss.'
)
loss_type
=
loss_config
.
WhichOneof
(
'classification_loss'
)
if
loss_type
==
'weighted_sigmoid'
:
config
=
loss_config
.
weighted_sigmoid
return
losses
.
WeightedSigmoidClassificationLoss
(
anchorwise_output
=
config
.
anchorwise_output
)
if
loss_type
==
'weighted_softmax'
:
config
=
loss_config
.
weighted_softmax
return
losses
.
WeightedSoftmaxClassificationLoss
(
anchorwise_output
=
config
.
anchorwise_output
)
if
loss_type
==
'bootstrapped_sigmoid'
:
config
=
loss_config
.
bootstrapped_sigmoid
return
losses
.
BootstrappedSigmoidClassificationLoss
(
alpha
=
config
.
alpha
,
bootstrap_type
=
(
'hard'
if
config
.
hard_bootstrap
else
'soft'
),
anchorwise_output
=
config
.
anchorwise_output
)
raise
ValueError
(
'Empty loss config.'
)
object_detection/builders/losses_builder_test.py
0 → 100644
View file @
44fa1d37
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for losses_builder."""
import
tensorflow
as
tf
from
google.protobuf
import
text_format
from
object_detection.builders
import
losses_builder
from
object_detection.core
import
losses
from
object_detection.protos
import
losses_pb2
class
LocalizationLossBuilderTest
(
tf
.
test
.
TestCase
):
def
test_build_weighted_l2_localization_loss
(
self
):
losses_text_proto
=
"""
localization_loss {
weighted_l2 {
}
}
classification_loss {
weighted_softmax {
}
}
"""
losses_proto
=
losses_pb2
.
Loss
()
text_format
.
Merge
(
losses_text_proto
,
losses_proto
)
_
,
localization_loss
,
_
,
_
,
_
=
losses_builder
.
build
(
losses_proto
)
self
.
assertTrue
(
isinstance
(
localization_loss
,
losses
.
WeightedL2LocalizationLoss
))
def
test_build_weighted_smooth_l1_localization_loss
(
self
):
losses_text_proto
=
"""
localization_loss {
weighted_smooth_l1 {
}
}
classification_loss {
weighted_softmax {
}
}
"""
losses_proto
=
losses_pb2
.
Loss
()
text_format
.
Merge
(
losses_text_proto
,
losses_proto
)
_
,
localization_loss
,
_
,
_
,
_
=
losses_builder
.
build
(
losses_proto
)
self
.
assertTrue
(
isinstance
(
localization_loss
,
losses
.
WeightedSmoothL1LocalizationLoss
))
def
test_build_weighted_iou_localization_loss
(
self
):
losses_text_proto
=
"""
localization_loss {
weighted_iou {
}
}
classification_loss {
weighted_softmax {
}
}
"""
losses_proto
=
losses_pb2
.
Loss
()
text_format
.
Merge
(
losses_text_proto
,
losses_proto
)
_
,
localization_loss
,
_
,
_
,
_
=
losses_builder
.
build
(
losses_proto
)
self
.
assertTrue
(
isinstance
(
localization_loss
,
losses
.
WeightedIOULocalizationLoss
))
def
test_anchorwise_output
(
self
):
losses_text_proto
=
"""
localization_loss {
weighted_smooth_l1 {
anchorwise_output: true
}
}
classification_loss {
weighted_softmax {
}
}
"""
losses_proto
=
losses_pb2
.
Loss
()
text_format
.
Merge
(
losses_text_proto
,
losses_proto
)
_
,
localization_loss
,
_
,
_
,
_
=
losses_builder
.
build
(
losses_proto
)
self
.
assertTrue
(
isinstance
(
localization_loss
,
losses
.
WeightedSmoothL1LocalizationLoss
))
predictions
=
tf
.
constant
([[[
0.0
,
0.0
,
1.0
,
1.0
],
[
0.0
,
0.0
,
1.0
,
1.0
]]])
targets
=
tf
.
constant
([[[
0.0
,
0.0
,
1.0
,
1.0
],
[
0.0
,
0.0
,
1.0
,
1.0
]]])
weights
=
tf
.
constant
([[
1.0
,
1.0
]])
loss
=
localization_loss
(
predictions
,
targets
,
weights
=
weights
)
self
.
assertEqual
(
loss
.
shape
,
[
1
,
2
])
def
test_raise_error_on_empty_localization_config
(
self
):
losses_text_proto
=
"""
classification_loss {
weighted_softmax {
}
}
"""
losses_proto
=
losses_pb2
.
Loss
()
text_format
.
Merge
(
losses_text_proto
,
losses_proto
)
with
self
.
assertRaises
(
ValueError
):
losses_builder
.
_build_localization_loss
(
losses_proto
)
class
ClassificationLossBuilderTest
(
tf
.
test
.
TestCase
):
def
test_build_weighted_sigmoid_classification_loss
(
self
):
losses_text_proto
=
"""
classification_loss {
weighted_sigmoid {
}
}
localization_loss {
weighted_l2 {
}
}
"""
losses_proto
=
losses_pb2
.
Loss
()
text_format
.
Merge
(
losses_text_proto
,
losses_proto
)
classification_loss
,
_
,
_
,
_
,
_
=
losses_builder
.
build
(
losses_proto
)
self
.
assertTrue
(
isinstance
(
classification_loss
,
losses
.
WeightedSigmoidClassificationLoss
))
def
test_build_weighted_softmax_classification_loss
(
self
):
losses_text_proto
=
"""
classification_loss {
weighted_softmax {
}
}
localization_loss {
weighted_l2 {
}
}
"""
losses_proto
=
losses_pb2
.
Loss
()
text_format
.
Merge
(
losses_text_proto
,
losses_proto
)
classification_loss
,
_
,
_
,
_
,
_
=
losses_builder
.
build
(
losses_proto
)
self
.
assertTrue
(
isinstance
(
classification_loss
,
losses
.
WeightedSoftmaxClassificationLoss
))
def
test_build_bootstrapped_sigmoid_classification_loss
(
self
):
losses_text_proto
=
"""
classification_loss {
bootstrapped_sigmoid {
alpha: 0.5
}
}
localization_loss {
weighted_l2 {
}
}
"""
losses_proto
=
losses_pb2
.
Loss
()
text_format
.
Merge
(
losses_text_proto
,
losses_proto
)
classification_loss
,
_
,
_
,
_
,
_
=
losses_builder
.
build
(
losses_proto
)
self
.
assertTrue
(
isinstance
(
classification_loss
,
losses
.
BootstrappedSigmoidClassificationLoss
))
def
test_anchorwise_output
(
self
):
losses_text_proto
=
"""
classification_loss {
weighted_sigmoid {
anchorwise_output: true
}
}
localization_loss {
weighted_l2 {
}
}
"""
losses_proto
=
losses_pb2
.
Loss
()
text_format
.
Merge
(
losses_text_proto
,
losses_proto
)
classification_loss
,
_
,
_
,
_
,
_
=
losses_builder
.
build
(
losses_proto
)
self
.
assertTrue
(
isinstance
(
classification_loss
,
losses
.
WeightedSigmoidClassificationLoss
))
predictions
=
tf
.
constant
([[[
0.0
,
1.0
,
0.0
],
[
0.0
,
0.5
,
0.5
]]])
targets
=
tf
.
constant
([[[
0.0
,
1.0
,
0.0
],
[
0.0
,
0.0
,
1.0
]]])
weights
=
tf
.
constant
([[
1.0
,
1.0
]])
loss
=
classification_loss
(
predictions
,
targets
,
weights
=
weights
)
self
.
assertEqual
(
loss
.
shape
,
[
1
,
2
])
def
test_raise_error_on_empty_config
(
self
):
losses_text_proto
=
"""
localization_loss {
weighted_l2 {
}
}
"""
losses_proto
=
losses_pb2
.
Loss
()
text_format
.
Merge
(
losses_text_proto
,
losses_proto
)
with
self
.
assertRaises
(
ValueError
):
losses_builder
.
build
(
losses_proto
)
class
HardExampleMinerBuilderTest
(
tf
.
test
.
TestCase
):
def
test_do_not_build_hard_example_miner_by_default
(
self
):
losses_text_proto
=
"""
localization_loss {
weighted_l2 {
}
}
classification_loss {
weighted_softmax {
}
}
"""
losses_proto
=
losses_pb2
.
Loss
()
text_format
.
Merge
(
losses_text_proto
,
losses_proto
)
_
,
_
,
_
,
_
,
hard_example_miner
=
losses_builder
.
build
(
losses_proto
)
self
.
assertEqual
(
hard_example_miner
,
None
)
def
test_build_hard_example_miner_for_classification_loss
(
self
):
losses_text_proto
=
"""
localization_loss {
weighted_l2 {
}
}
classification_loss {
weighted_softmax {
}
}
hard_example_miner {
loss_type: CLASSIFICATION
}
"""
losses_proto
=
losses_pb2
.
Loss
()
text_format
.
Merge
(
losses_text_proto
,
losses_proto
)
_
,
_
,
_
,
_
,
hard_example_miner
=
losses_builder
.
build
(
losses_proto
)
self
.
assertTrue
(
isinstance
(
hard_example_miner
,
losses
.
HardExampleMiner
))
self
.
assertEqual
(
hard_example_miner
.
_loss_type
,
'cls'
)
def
test_build_hard_example_miner_for_localization_loss
(
self
):
losses_text_proto
=
"""
localization_loss {
weighted_l2 {
}
}
classification_loss {
weighted_softmax {
}
}
hard_example_miner {
loss_type: LOCALIZATION
}
"""
losses_proto
=
losses_pb2
.
Loss
()
text_format
.
Merge
(
losses_text_proto
,
losses_proto
)
_
,
_
,
_
,
_
,
hard_example_miner
=
losses_builder
.
build
(
losses_proto
)
self
.
assertTrue
(
isinstance
(
hard_example_miner
,
losses
.
HardExampleMiner
))
self
.
assertEqual
(
hard_example_miner
.
_loss_type
,
'loc'
)
def
test_build_hard_example_miner_with_non_default_values
(
self
):
losses_text_proto
=
"""
localization_loss {
weighted_l2 {
}
}
classification_loss {
weighted_softmax {
}
}
hard_example_miner {
num_hard_examples: 32
iou_threshold: 0.5
loss_type: LOCALIZATION
max_negatives_per_positive: 10
min_negatives_per_image: 3
}
"""
losses_proto
=
losses_pb2
.
Loss
()
text_format
.
Merge
(
losses_text_proto
,
losses_proto
)
_
,
_
,
_
,
_
,
hard_example_miner
=
losses_builder
.
build
(
losses_proto
)
self
.
assertTrue
(
isinstance
(
hard_example_miner
,
losses
.
HardExampleMiner
))
self
.
assertEqual
(
hard_example_miner
.
_num_hard_examples
,
32
)
self
.
assertAlmostEqual
(
hard_example_miner
.
_iou_threshold
,
0.5
)
self
.
assertEqual
(
hard_example_miner
.
_max_negatives_per_positive
,
10
)
self
.
assertEqual
(
hard_example_miner
.
_min_negatives_per_image
,
3
)
class
LossBuilderTest
(
tf
.
test
.
TestCase
):
def
test_build_all_loss_parameters
(
self
):
losses_text_proto
=
"""
localization_loss {
weighted_l2 {
}
}
classification_loss {
weighted_softmax {
}
}
hard_example_miner {
}
classification_weight: 0.8
localization_weight: 0.2
"""
losses_proto
=
losses_pb2
.
Loss
()
text_format
.
Merge
(
losses_text_proto
,
losses_proto
)
(
classification_loss
,
localization_loss
,
classification_weight
,
localization_weight
,
hard_example_miner
)
=
losses_builder
.
build
(
losses_proto
)
self
.
assertTrue
(
isinstance
(
hard_example_miner
,
losses
.
HardExampleMiner
))
self
.
assertTrue
(
isinstance
(
classification_loss
,
losses
.
WeightedSoftmaxClassificationLoss
))
self
.
assertTrue
(
isinstance
(
localization_loss
,
losses
.
WeightedL2LocalizationLoss
))
self
.
assertAlmostEqual
(
classification_weight
,
0.8
)
self
.
assertAlmostEqual
(
localization_weight
,
0.2
)
if
__name__
==
'__main__'
:
tf
.
test
.
main
()
object_detection/builders/matcher_builder.py
0 → 100644
View file @
44fa1d37
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A function to build an object detection matcher from configuration."""
from
object_detection.matchers
import
argmax_matcher
from
object_detection.matchers
import
bipartite_matcher
from
object_detection.protos
import
matcher_pb2
def
build
(
matcher_config
):
"""Builds a matcher object based on the matcher config.
Args:
matcher_config: A matcher.proto object containing the config for the desired
Matcher.
Returns:
Matcher based on the config.
Raises:
ValueError: On empty matcher proto.
"""
if
not
isinstance
(
matcher_config
,
matcher_pb2
.
Matcher
):
raise
ValueError
(
'matcher_config not of type matcher_pb2.Matcher.'
)
if
matcher_config
.
WhichOneof
(
'matcher_oneof'
)
==
'argmax_matcher'
:
matcher
=
matcher_config
.
argmax_matcher
matched_threshold
=
unmatched_threshold
=
None
if
not
matcher
.
ignore_thresholds
:
matched_threshold
=
matcher
.
matched_threshold
unmatched_threshold
=
matcher
.
unmatched_threshold
return
argmax_matcher
.
ArgMaxMatcher
(
matched_threshold
=
matched_threshold
,
unmatched_threshold
=
unmatched_threshold
,
negatives_lower_than_unmatched
=
matcher
.
negatives_lower_than_unmatched
,
force_match_for_each_row
=
matcher
.
force_match_for_each_row
)
if
matcher_config
.
WhichOneof
(
'matcher_oneof'
)
==
'bipartite_matcher'
:
return
bipartite_matcher
.
GreedyBipartiteMatcher
()
raise
ValueError
(
'Empty matcher.'
)
object_detection/builders/matcher_builder_test.py
0 → 100644
View file @
44fa1d37
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for matcher_builder."""
import
tensorflow
as
tf
from
google.protobuf
import
text_format
from
object_detection.builders
import
matcher_builder
from
object_detection.matchers
import
argmax_matcher
from
object_detection.matchers
import
bipartite_matcher
from
object_detection.protos
import
matcher_pb2
class
MatcherBuilderTest
(
tf
.
test
.
TestCase
):
def
test_build_arg_max_matcher_with_defaults
(
self
):
matcher_text_proto
=
"""
argmax_matcher {
}
"""
matcher_proto
=
matcher_pb2
.
Matcher
()
text_format
.
Merge
(
matcher_text_proto
,
matcher_proto
)
matcher_object
=
matcher_builder
.
build
(
matcher_proto
)
self
.
assertTrue
(
isinstance
(
matcher_object
,
argmax_matcher
.
ArgMaxMatcher
))
self
.
assertAlmostEqual
(
matcher_object
.
_matched_threshold
,
0.5
)
self
.
assertAlmostEqual
(
matcher_object
.
_unmatched_threshold
,
0.5
)
self
.
assertTrue
(
matcher_object
.
_negatives_lower_than_unmatched
)
self
.
assertFalse
(
matcher_object
.
_force_match_for_each_row
)
def
test_build_arg_max_matcher_without_thresholds
(
self
):
matcher_text_proto
=
"""
argmax_matcher {
ignore_thresholds: true
}
"""
matcher_proto
=
matcher_pb2
.
Matcher
()
text_format
.
Merge
(
matcher_text_proto
,
matcher_proto
)
matcher_object
=
matcher_builder
.
build
(
matcher_proto
)
self
.
assertTrue
(
isinstance
(
matcher_object
,
argmax_matcher
.
ArgMaxMatcher
))
self
.
assertEqual
(
matcher_object
.
_matched_threshold
,
None
)
self
.
assertEqual
(
matcher_object
.
_unmatched_threshold
,
None
)
self
.
assertTrue
(
matcher_object
.
_negatives_lower_than_unmatched
)
self
.
assertFalse
(
matcher_object
.
_force_match_for_each_row
)
def
test_build_arg_max_matcher_with_non_default_parameters
(
self
):
matcher_text_proto
=
"""
argmax_matcher {
matched_threshold: 0.7
unmatched_threshold: 0.3
negatives_lower_than_unmatched: false
force_match_for_each_row: true
}
"""
matcher_proto
=
matcher_pb2
.
Matcher
()
text_format
.
Merge
(
matcher_text_proto
,
matcher_proto
)
matcher_object
=
matcher_builder
.
build
(
matcher_proto
)
self
.
assertTrue
(
isinstance
(
matcher_object
,
argmax_matcher
.
ArgMaxMatcher
))
self
.
assertAlmostEqual
(
matcher_object
.
_matched_threshold
,
0.7
)
self
.
assertAlmostEqual
(
matcher_object
.
_unmatched_threshold
,
0.3
)
self
.
assertFalse
(
matcher_object
.
_negatives_lower_than_unmatched
)
self
.
assertTrue
(
matcher_object
.
_force_match_for_each_row
)
def
test_build_bipartite_matcher
(
self
):
matcher_text_proto
=
"""
bipartite_matcher {
}
"""
matcher_proto
=
matcher_pb2
.
Matcher
()
text_format
.
Merge
(
matcher_text_proto
,
matcher_proto
)
matcher_object
=
matcher_builder
.
build
(
matcher_proto
)
self
.
assertTrue
(
isinstance
(
matcher_object
,
bipartite_matcher
.
GreedyBipartiteMatcher
))
def
test_raise_error_on_empty_matcher
(
self
):
matcher_text_proto
=
"""
"""
matcher_proto
=
matcher_pb2
.
Matcher
()
text_format
.
Merge
(
matcher_text_proto
,
matcher_proto
)
with
self
.
assertRaises
(
ValueError
):
matcher_builder
.
build
(
matcher_proto
)
if
__name__
==
'__main__'
:
tf
.
test
.
main
()
object_detection/builders/model_builder.py
0 → 100644
View file @
44fa1d37
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A function to build a DetectionModel from configuration."""
from
object_detection.builders
import
anchor_generator_builder
from
object_detection.builders
import
box_coder_builder
from
object_detection.builders
import
box_predictor_builder
from
object_detection.builders
import
hyperparams_builder
from
object_detection.builders
import
image_resizer_builder
from
object_detection.builders
import
losses_builder
from
object_detection.builders
import
matcher_builder
from
object_detection.builders
import
post_processing_builder
from
object_detection.builders
import
region_similarity_calculator_builder
as
sim_calc
from
object_detection.core
import
box_predictor
from
object_detection.meta_architectures
import
faster_rcnn_meta_arch
from
object_detection.meta_architectures
import
rfcn_meta_arch
from
object_detection.meta_architectures
import
ssd_meta_arch
from
object_detection.models
import
faster_rcnn_inception_resnet_v2_feature_extractor
as
frcnn_inc_res
from
object_detection.models
import
faster_rcnn_resnet_v1_feature_extractor
as
frcnn_resnet_v1
from
object_detection.models.ssd_inception_v2_feature_extractor
import
SSDInceptionV2FeatureExtractor
from
object_detection.models.ssd_mobilenet_v1_feature_extractor
import
SSDMobileNetV1FeatureExtractor
from
object_detection.protos
import
model_pb2
# A map of names to SSD feature extractors.
SSD_FEATURE_EXTRACTOR_CLASS_MAP
=
{
'ssd_inception_v2'
:
SSDInceptionV2FeatureExtractor
,
'ssd_mobilenet_v1'
:
SSDMobileNetV1FeatureExtractor
,
}
# A map of names to Faster R-CNN feature extractors.
FASTER_RCNN_FEATURE_EXTRACTOR_CLASS_MAP
=
{
'faster_rcnn_resnet50'
:
frcnn_resnet_v1
.
FasterRCNNResnet50FeatureExtractor
,
'faster_rcnn_resnet101'
:
frcnn_resnet_v1
.
FasterRCNNResnet101FeatureExtractor
,
'faster_rcnn_resnet152'
:
frcnn_resnet_v1
.
FasterRCNNResnet152FeatureExtractor
,
'faster_rcnn_inception_resnet_v2'
:
frcnn_inc_res
.
FasterRCNNInceptionResnetV2FeatureExtractor
}
def
build
(
model_config
,
is_training
):
"""Builds a DetectionModel based on the model config.
Args:
model_config: A model.proto object containing the config for the desired
DetectionModel.
is_training: True if this model is being built for training purposes.
Returns:
DetectionModel based on the config.
Raises:
ValueError: On invalid meta architecture or model.
"""
if
not
isinstance
(
model_config
,
model_pb2
.
DetectionModel
):
raise
ValueError
(
'model_config not of type model_pb2.DetectionModel.'
)
meta_architecture
=
model_config
.
WhichOneof
(
'model'
)
if
meta_architecture
==
'ssd'
:
return
_build_ssd_model
(
model_config
.
ssd
,
is_training
)
if
meta_architecture
==
'faster_rcnn'
:
return
_build_faster_rcnn_model
(
model_config
.
faster_rcnn
,
is_training
)
raise
ValueError
(
'Unknown meta architecture: {}'
.
format
(
meta_architecture
))
def
_build_ssd_feature_extractor
(
feature_extractor_config
,
is_training
,
reuse_weights
=
None
):
"""Builds a ssd_meta_arch.SSDFeatureExtractor based on config.
Args:
feature_extractor_config: A SSDFeatureExtractor proto config from ssd.proto.
is_training: True if this feature extractor is being built for training.
reuse_weights: if the feature extractor should reuse weights.
Returns:
ssd_meta_arch.SSDFeatureExtractor based on config.
Raises:
ValueError: On invalid feature extractor type.
"""
feature_type
=
feature_extractor_config
.
type
depth_multiplier
=
feature_extractor_config
.
depth_multiplier
min_depth
=
feature_extractor_config
.
min_depth
conv_hyperparams
=
hyperparams_builder
.
build
(
feature_extractor_config
.
conv_hyperparams
,
is_training
)
if
feature_type
not
in
SSD_FEATURE_EXTRACTOR_CLASS_MAP
:
raise
ValueError
(
'Unknown ssd feature_extractor: {}'
.
format
(
feature_type
))
feature_extractor_class
=
SSD_FEATURE_EXTRACTOR_CLASS_MAP
[
feature_type
]
return
feature_extractor_class
(
depth_multiplier
,
min_depth
,
conv_hyperparams
,
reuse_weights
)
def
_build_ssd_model
(
ssd_config
,
is_training
):
"""Builds an SSD detection model based on the model config.
Args:
ssd_config: A ssd.proto object containing the config for the desired
SSDMetaArch.
is_training: True if this model is being built for training purposes.
Returns:
SSDMetaArch based on the config.
Raises:
ValueError: If ssd_config.type is not recognized (i.e. not registered in
model_class_map).
"""
num_classes
=
ssd_config
.
num_classes
# Feature extractor
feature_extractor
=
_build_ssd_feature_extractor
(
ssd_config
.
feature_extractor
,
is_training
)
box_coder
=
box_coder_builder
.
build
(
ssd_config
.
box_coder
)
matcher
=
matcher_builder
.
build
(
ssd_config
.
matcher
)
region_similarity_calculator
=
sim_calc
.
build
(
ssd_config
.
similarity_calculator
)
ssd_box_predictor
=
box_predictor_builder
.
build
(
hyperparams_builder
.
build
,
ssd_config
.
box_predictor
,
is_training
,
num_classes
)
anchor_generator
=
anchor_generator_builder
.
build
(
ssd_config
.
anchor_generator
)
image_resizer_fn
=
image_resizer_builder
.
build
(
ssd_config
.
image_resizer
)
non_max_suppression_fn
,
score_conversion_fn
=
post_processing_builder
.
build
(
ssd_config
.
post_processing
)
(
classification_loss
,
localization_loss
,
classification_weight
,
localization_weight
,
hard_example_miner
)
=
losses_builder
.
build
(
ssd_config
.
loss
)
normalize_loss_by_num_matches
=
ssd_config
.
normalize_loss_by_num_matches
return
ssd_meta_arch
.
SSDMetaArch
(
is_training
,
anchor_generator
,
ssd_box_predictor
,
box_coder
,
feature_extractor
,
matcher
,
region_similarity_calculator
,
image_resizer_fn
,
non_max_suppression_fn
,
score_conversion_fn
,
classification_loss
,
localization_loss
,
classification_weight
,
localization_weight
,
normalize_loss_by_num_matches
,
hard_example_miner
)
def
_build_faster_rcnn_feature_extractor
(
feature_extractor_config
,
is_training
,
reuse_weights
=
None
):
"""Builds a faster_rcnn_meta_arch.FasterRCNNFeatureExtractor based on config.
Args:
feature_extractor_config: A FasterRcnnFeatureExtractor proto config from
faster_rcnn.proto.
is_training: True if this feature extractor is being built for training.
reuse_weights: if the feature extractor should reuse weights.
Returns:
faster_rcnn_meta_arch.FasterRCNNFeatureExtractor based on config.
Raises:
ValueError: On invalid feature extractor type.
"""
feature_type
=
feature_extractor_config
.
type
first_stage_features_stride
=
(
feature_extractor_config
.
first_stage_features_stride
)
if
feature_type
not
in
FASTER_RCNN_FEATURE_EXTRACTOR_CLASS_MAP
:
raise
ValueError
(
'Unknown Faster R-CNN feature_extractor: {}'
.
format
(
feature_type
))
feature_extractor_class
=
FASTER_RCNN_FEATURE_EXTRACTOR_CLASS_MAP
[
feature_type
]
return
feature_extractor_class
(
is_training
,
first_stage_features_stride
,
reuse_weights
)
def
_build_faster_rcnn_model
(
frcnn_config
,
is_training
):
"""Builds a Faster R-CNN or R-FCN detection model based on the model config.
Builds R-FCN model if the second_stage_box_predictor in the config is of type
`rfcn_box_predictor` else builds a Faster R-CNN model.
Args:
frcnn_config: A faster_rcnn.proto object containing the config for the
desired FasterRCNNMetaArch or RFCNMetaArch.
is_training: True if this model is being built for training purposes.
Returns:
FasterRCNNMetaArch based on the config.
Raises:
ValueError: If frcnn_config.type is not recognized (i.e. not registered in
model_class_map).
"""
num_classes
=
frcnn_config
.
num_classes
image_resizer_fn
=
image_resizer_builder
.
build
(
frcnn_config
.
image_resizer
)
feature_extractor
=
_build_faster_rcnn_feature_extractor
(
frcnn_config
.
feature_extractor
,
is_training
)
first_stage_only
=
frcnn_config
.
first_stage_only
first_stage_anchor_generator
=
anchor_generator_builder
.
build
(
frcnn_config
.
first_stage_anchor_generator
)
first_stage_atrous_rate
=
frcnn_config
.
first_stage_atrous_rate
first_stage_box_predictor_arg_scope
=
hyperparams_builder
.
build
(
frcnn_config
.
first_stage_box_predictor_conv_hyperparams
,
is_training
)
first_stage_box_predictor_kernel_size
=
(
frcnn_config
.
first_stage_box_predictor_kernel_size
)
first_stage_box_predictor_depth
=
frcnn_config
.
first_stage_box_predictor_depth
first_stage_minibatch_size
=
frcnn_config
.
first_stage_minibatch_size
first_stage_positive_balance_fraction
=
(
frcnn_config
.
first_stage_positive_balance_fraction
)
first_stage_nms_score_threshold
=
frcnn_config
.
first_stage_nms_score_threshold
first_stage_nms_iou_threshold
=
frcnn_config
.
first_stage_nms_iou_threshold
first_stage_max_proposals
=
frcnn_config
.
first_stage_max_proposals
first_stage_loc_loss_weight
=
(
frcnn_config
.
first_stage_localization_loss_weight
)
first_stage_obj_loss_weight
=
frcnn_config
.
first_stage_objectness_loss_weight
initial_crop_size
=
frcnn_config
.
initial_crop_size
maxpool_kernel_size
=
frcnn_config
.
maxpool_kernel_size
maxpool_stride
=
frcnn_config
.
maxpool_stride
second_stage_box_predictor
=
box_predictor_builder
.
build
(
hyperparams_builder
.
build
,
frcnn_config
.
second_stage_box_predictor
,
is_training
=
is_training
,
num_classes
=
num_classes
)
second_stage_batch_size
=
frcnn_config
.
second_stage_batch_size
second_stage_balance_fraction
=
frcnn_config
.
second_stage_balance_fraction
(
second_stage_non_max_suppression_fn
,
second_stage_score_conversion_fn
)
=
post_processing_builder
.
build
(
frcnn_config
.
second_stage_post_processing
)
second_stage_localization_loss_weight
=
(
frcnn_config
.
second_stage_localization_loss_weight
)
second_stage_classification_loss_weight
=
(
frcnn_config
.
second_stage_classification_loss_weight
)
hard_example_miner
=
None
if
frcnn_config
.
HasField
(
'hard_example_miner'
):
hard_example_miner
=
losses_builder
.
build_hard_example_miner
(
frcnn_config
.
hard_example_miner
,
second_stage_classification_loss_weight
,
second_stage_localization_loss_weight
)
common_kwargs
=
{
'is_training'
:
is_training
,
'num_classes'
:
num_classes
,
'image_resizer_fn'
:
image_resizer_fn
,
'feature_extractor'
:
feature_extractor
,
'first_stage_only'
:
first_stage_only
,
'first_stage_anchor_generator'
:
first_stage_anchor_generator
,
'first_stage_atrous_rate'
:
first_stage_atrous_rate
,
'first_stage_box_predictor_arg_scope'
:
first_stage_box_predictor_arg_scope
,
'first_stage_box_predictor_kernel_size'
:
first_stage_box_predictor_kernel_size
,
'first_stage_box_predictor_depth'
:
first_stage_box_predictor_depth
,
'first_stage_minibatch_size'
:
first_stage_minibatch_size
,
'first_stage_positive_balance_fraction'
:
first_stage_positive_balance_fraction
,
'first_stage_nms_score_threshold'
:
first_stage_nms_score_threshold
,
'first_stage_nms_iou_threshold'
:
first_stage_nms_iou_threshold
,
'first_stage_max_proposals'
:
first_stage_max_proposals
,
'first_stage_localization_loss_weight'
:
first_stage_loc_loss_weight
,
'first_stage_objectness_loss_weight'
:
first_stage_obj_loss_weight
,
'second_stage_batch_size'
:
second_stage_batch_size
,
'second_stage_balance_fraction'
:
second_stage_balance_fraction
,
'second_stage_non_max_suppression_fn'
:
second_stage_non_max_suppression_fn
,
'second_stage_score_conversion_fn'
:
second_stage_score_conversion_fn
,
'second_stage_localization_loss_weight'
:
second_stage_localization_loss_weight
,
'second_stage_classification_loss_weight'
:
second_stage_classification_loss_weight
,
'hard_example_miner'
:
hard_example_miner
}
if
isinstance
(
second_stage_box_predictor
,
box_predictor
.
RfcnBoxPredictor
):
return
rfcn_meta_arch
.
RFCNMetaArch
(
second_stage_rfcn_box_predictor
=
second_stage_box_predictor
,
**
common_kwargs
)
else
:
return
faster_rcnn_meta_arch
.
FasterRCNNMetaArch
(
initial_crop_size
=
initial_crop_size
,
maxpool_kernel_size
=
maxpool_kernel_size
,
maxpool_stride
=
maxpool_stride
,
second_stage_mask_rcnn_box_predictor
=
second_stage_box_predictor
,
**
common_kwargs
)
object_detection/builders/model_builder_test.py
0 → 100644
View file @
44fa1d37
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.models.model_builder."""
import
tensorflow
as
tf
from
google.protobuf
import
text_format
from
object_detection.builders
import
model_builder
from
object_detection.meta_architectures
import
faster_rcnn_meta_arch
from
object_detection.meta_architectures
import
rfcn_meta_arch
from
object_detection.meta_architectures
import
ssd_meta_arch
from
object_detection.models
import
faster_rcnn_inception_resnet_v2_feature_extractor
as
frcnn_inc_res
from
object_detection.models
import
faster_rcnn_resnet_v1_feature_extractor
as
frcnn_resnet_v1
from
object_detection.models.ssd_inception_v2_feature_extractor
import
SSDInceptionV2FeatureExtractor
from
object_detection.models.ssd_mobilenet_v1_feature_extractor
import
SSDMobileNetV1FeatureExtractor
from
object_detection.protos
import
model_pb2
FEATURE_EXTRACTOR_MAPS
=
{
'faster_rcnn_resnet50'
:
frcnn_resnet_v1
.
FasterRCNNResnet50FeatureExtractor
,
'faster_rcnn_resnet101'
:
frcnn_resnet_v1
.
FasterRCNNResnet101FeatureExtractor
,
'faster_rcnn_resnet152'
:
frcnn_resnet_v1
.
FasterRCNNResnet152FeatureExtractor
}
class
ModelBuilderTest
(
tf
.
test
.
TestCase
):
def
create_model
(
self
,
model_config
):
"""Builds a DetectionModel based on the model config.
Args:
model_config: A model.proto object containing the config for the desired
DetectionModel.
Returns:
DetectionModel based on the config.
"""
return
model_builder
.
build
(
model_config
,
is_training
=
True
)
def
test_create_ssd_inception_v2_model_from_config
(
self
):
model_text_proto
=
"""
ssd {
feature_extractor {
type: 'ssd_inception_v2'
conv_hyperparams {
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
}
}
box_coder {
faster_rcnn_box_coder {
}
}
matcher {
argmax_matcher {
}
}
similarity_calculator {
iou_similarity {
}
}
anchor_generator {
ssd_anchor_generator {
aspect_ratios: 1.0
}
}
image_resizer {
fixed_shape_resizer {
height: 320
width: 320
}
}
box_predictor {
convolutional_box_predictor {
conv_hyperparams {
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
}
}
}
loss {
classification_loss {
weighted_softmax {
}
}
localization_loss {
weighted_smooth_l1 {
}
}
}
}"""
model_proto
=
model_pb2
.
DetectionModel
()
text_format
.
Merge
(
model_text_proto
,
model_proto
)
model
=
self
.
create_model
(
model_proto
)
self
.
assertIsInstance
(
model
,
ssd_meta_arch
.
SSDMetaArch
)
self
.
assertIsInstance
(
model
.
_feature_extractor
,
SSDInceptionV2FeatureExtractor
)
def
test_create_ssd_mobilenet_v1_model_from_config
(
self
):
model_text_proto
=
"""
ssd {
feature_extractor {
type: 'ssd_mobilenet_v1'
conv_hyperparams {
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
}
}
box_coder {
faster_rcnn_box_coder {
}
}
matcher {
argmax_matcher {
}
}
similarity_calculator {
iou_similarity {
}
}
anchor_generator {
ssd_anchor_generator {
aspect_ratios: 1.0
}
}
image_resizer {
fixed_shape_resizer {
height: 320
width: 320
}
}
box_predictor {
convolutional_box_predictor {
conv_hyperparams {
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
}
}
}
loss {
classification_loss {
weighted_softmax {
}
}
localization_loss {
weighted_smooth_l1 {
}
}
}
}"""
model_proto
=
model_pb2
.
DetectionModel
()
text_format
.
Merge
(
model_text_proto
,
model_proto
)
model
=
self
.
create_model
(
model_proto
)
self
.
assertIsInstance
(
model
,
ssd_meta_arch
.
SSDMetaArch
)
self
.
assertIsInstance
(
model
.
_feature_extractor
,
SSDMobileNetV1FeatureExtractor
)
def
test_create_faster_rcnn_resnet_v1_models_from_config
(
self
):
model_text_proto
=
"""
faster_rcnn {
num_classes: 3
image_resizer {
keep_aspect_ratio_resizer {
min_dimension: 600
max_dimension: 1024
}
}
feature_extractor {
type: 'faster_rcnn_resnet101'
}
first_stage_anchor_generator {
grid_anchor_generator {
scales: [0.25, 0.5, 1.0, 2.0]
aspect_ratios: [0.5, 1.0, 2.0]
height_stride: 16
width_stride: 16
}
}
first_stage_box_predictor_conv_hyperparams {
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
}
initial_crop_size: 14
maxpool_kernel_size: 2
maxpool_stride: 2
second_stage_box_predictor {
mask_rcnn_box_predictor {
fc_hyperparams {
op: FC
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
}
}
}
second_stage_post_processing {
batch_non_max_suppression {
score_threshold: 0.01
iou_threshold: 0.6
max_detections_per_class: 100
max_total_detections: 300
}
score_converter: SOFTMAX
}
}"""
model_proto
=
model_pb2
.
DetectionModel
()
text_format
.
Merge
(
model_text_proto
,
model_proto
)
for
extractor_type
,
extractor_class
in
FEATURE_EXTRACTOR_MAPS
.
items
():
model_proto
.
faster_rcnn
.
feature_extractor
.
type
=
extractor_type
model
=
model_builder
.
build
(
model_proto
,
is_training
=
True
)
self
.
assertIsInstance
(
model
,
faster_rcnn_meta_arch
.
FasterRCNNMetaArch
)
self
.
assertIsInstance
(
model
.
_feature_extractor
,
extractor_class
)
def
test_create_faster_rcnn_inception_resnet_v2_model_from_config
(
self
):
model_text_proto
=
"""
faster_rcnn {
num_classes: 3
image_resizer {
keep_aspect_ratio_resizer {
min_dimension: 600
max_dimension: 1024
}
}
feature_extractor {
type: 'faster_rcnn_inception_resnet_v2'
}
first_stage_anchor_generator {
grid_anchor_generator {
scales: [0.25, 0.5, 1.0, 2.0]
aspect_ratios: [0.5, 1.0, 2.0]
height_stride: 16
width_stride: 16
}
}
first_stage_box_predictor_conv_hyperparams {
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
}
initial_crop_size: 17
maxpool_kernel_size: 1
maxpool_stride: 1
second_stage_box_predictor {
mask_rcnn_box_predictor {
fc_hyperparams {
op: FC
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
}
}
}
second_stage_post_processing {
batch_non_max_suppression {
score_threshold: 0.01
iou_threshold: 0.6
max_detections_per_class: 100
max_total_detections: 300
}
score_converter: SOFTMAX
}
}"""
model_proto
=
model_pb2
.
DetectionModel
()
text_format
.
Merge
(
model_text_proto
,
model_proto
)
model
=
model_builder
.
build
(
model_proto
,
is_training
=
True
)
self
.
assertIsInstance
(
model
,
faster_rcnn_meta_arch
.
FasterRCNNMetaArch
)
self
.
assertIsInstance
(
model
.
_feature_extractor
,
frcnn_inc_res
.
FasterRCNNInceptionResnetV2FeatureExtractor
)
def
test_create_faster_rcnn_model_from_config_with_example_miner
(
self
):
model_text_proto
=
"""
faster_rcnn {
num_classes: 3
feature_extractor {
type: 'faster_rcnn_inception_resnet_v2'
}
image_resizer {
keep_aspect_ratio_resizer {
min_dimension: 600
max_dimension: 1024
}
}
first_stage_anchor_generator {
grid_anchor_generator {
scales: [0.25, 0.5, 1.0, 2.0]
aspect_ratios: [0.5, 1.0, 2.0]
height_stride: 16
width_stride: 16
}
}
first_stage_box_predictor_conv_hyperparams {
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
}
second_stage_box_predictor {
mask_rcnn_box_predictor {
fc_hyperparams {
op: FC
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
}
}
}
hard_example_miner {
num_hard_examples: 10
iou_threshold: 0.99
}
}"""
model_proto
=
model_pb2
.
DetectionModel
()
text_format
.
Merge
(
model_text_proto
,
model_proto
)
model
=
model_builder
.
build
(
model_proto
,
is_training
=
True
)
self
.
assertIsNotNone
(
model
.
_hard_example_miner
)
def
test_create_rfcn_resnet_v1_model_from_config
(
self
):
model_text_proto
=
"""
faster_rcnn {
num_classes: 3
image_resizer {
keep_aspect_ratio_resizer {
min_dimension: 600
max_dimension: 1024
}
}
feature_extractor {
type: 'faster_rcnn_resnet101'
}
first_stage_anchor_generator {
grid_anchor_generator {
scales: [0.25, 0.5, 1.0, 2.0]
aspect_ratios: [0.5, 1.0, 2.0]
height_stride: 16
width_stride: 16
}
}
first_stage_box_predictor_conv_hyperparams {
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
}
initial_crop_size: 14
maxpool_kernel_size: 2
maxpool_stride: 2
second_stage_box_predictor {
rfcn_box_predictor {
conv_hyperparams {
op: CONV
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
}
}
}
second_stage_post_processing {
batch_non_max_suppression {
score_threshold: 0.01
iou_threshold: 0.6
max_detections_per_class: 100
max_total_detections: 300
}
score_converter: SOFTMAX
}
}"""
model_proto
=
model_pb2
.
DetectionModel
()
text_format
.
Merge
(
model_text_proto
,
model_proto
)
for
extractor_type
,
extractor_class
in
FEATURE_EXTRACTOR_MAPS
.
items
():
model_proto
.
faster_rcnn
.
feature_extractor
.
type
=
extractor_type
model
=
model_builder
.
build
(
model_proto
,
is_training
=
True
)
self
.
assertIsInstance
(
model
,
rfcn_meta_arch
.
RFCNMetaArch
)
self
.
assertIsInstance
(
model
.
_feature_extractor
,
extractor_class
)
if
__name__
==
'__main__'
:
tf
.
test
.
main
()
object_detection/builders/optimizer_builder.py
0 → 100644
View file @
44fa1d37
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions to build DetectionModel training optimizers."""
import
tensorflow
as
tf
from
object_detection.utils
import
learning_schedules
slim
=
tf
.
contrib
.
slim
def
build
(
optimizer_config
,
global_summaries
):
"""Create optimizer based on config.
Args:
optimizer_config: A Optimizer proto message.
global_summaries: A set to attach learning rate summary to.
Returns:
An optimizer.
Raises:
ValueError: when using an unsupported input data type.
"""
optimizer_type
=
optimizer_config
.
WhichOneof
(
'optimizer'
)
optimizer
=
None
if
optimizer_type
==
'rms_prop_optimizer'
:
config
=
optimizer_config
.
rms_prop_optimizer
optimizer
=
tf
.
train
.
RMSPropOptimizer
(
_create_learning_rate
(
config
.
learning_rate
,
global_summaries
),
decay
=
config
.
decay
,
momentum
=
config
.
momentum_optimizer_value
,
epsilon
=
config
.
epsilon
)
if
optimizer_type
==
'momentum_optimizer'
:
config
=
optimizer_config
.
momentum_optimizer
optimizer
=
tf
.
train
.
MomentumOptimizer
(
_create_learning_rate
(
config
.
learning_rate
,
global_summaries
),
momentum
=
config
.
momentum_optimizer_value
)
if
optimizer_type
==
'adam_optimizer'
:
config
=
optimizer_config
.
adam_optimizer
optimizer
=
tf
.
train
.
AdamOptimizer
(
_create_learning_rate
(
config
.
learning_rate
,
global_summaries
))
if
optimizer
is
None
:
raise
ValueError
(
'Optimizer %s not supported.'
%
optimizer_type
)
if
optimizer_config
.
use_moving_average
:
optimizer
=
tf
.
contrib
.
opt
.
MovingAverageOptimizer
(
optimizer
,
average_decay
=
optimizer_config
.
moving_average_decay
)
return
optimizer
def
_create_learning_rate
(
learning_rate_config
,
global_summaries
):
"""Create optimizer learning rate based on config.
Args:
learning_rate_config: A LearningRate proto message.
global_summaries: A set to attach learning rate summary to.
Returns:
A learning rate.
Raises:
ValueError: when using an unsupported input data type.
"""
learning_rate
=
None
learning_rate_type
=
learning_rate_config
.
WhichOneof
(
'learning_rate'
)
if
learning_rate_type
==
'constant_learning_rate'
:
config
=
learning_rate_config
.
constant_learning_rate
learning_rate
=
config
.
learning_rate
if
learning_rate_type
==
'exponential_decay_learning_rate'
:
config
=
learning_rate_config
.
exponential_decay_learning_rate
learning_rate
=
tf
.
train
.
exponential_decay
(
config
.
initial_learning_rate
,
slim
.
get_or_create_global_step
(),
config
.
decay_steps
,
config
.
decay_factor
,
staircase
=
config
.
staircase
)
if
learning_rate_type
==
'manual_step_learning_rate'
:
config
=
learning_rate_config
.
manual_step_learning_rate
if
not
config
.
schedule
:
raise
ValueError
(
'Empty learning rate schedule.'
)
learning_rate_step_boundaries
=
[
x
.
step
for
x
in
config
.
schedule
]
learning_rate_sequence
=
[
config
.
initial_learning_rate
]
learning_rate_sequence
+=
[
x
.
learning_rate
for
x
in
config
.
schedule
]
learning_rate
=
learning_schedules
.
manual_stepping
(
slim
.
get_or_create_global_step
(),
learning_rate_step_boundaries
,
learning_rate_sequence
)
if
learning_rate
is
None
:
raise
ValueError
(
'Learning_rate %s not supported.'
%
learning_rate_type
)
global_summaries
.
add
(
tf
.
summary
.
scalar
(
'Learning Rate'
,
learning_rate
))
return
learning_rate
object_detection/builders/optimizer_builder_test.py
0 → 100644
View file @
44fa1d37
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for optimizer_builder."""
import
tensorflow
as
tf
from
google.protobuf
import
text_format
from
object_detection.builders
import
optimizer_builder
from
object_detection.protos
import
optimizer_pb2
class
LearningRateBuilderTest
(
tf
.
test
.
TestCase
):
def
testBuildConstantLearningRate
(
self
):
learning_rate_text_proto
=
"""
constant_learning_rate {
learning_rate: 0.004
}
"""
global_summaries
=
set
([])
learning_rate_proto
=
optimizer_pb2
.
LearningRate
()
text_format
.
Merge
(
learning_rate_text_proto
,
learning_rate_proto
)
learning_rate
=
optimizer_builder
.
_create_learning_rate
(
learning_rate_proto
,
global_summaries
)
self
.
assertAlmostEqual
(
learning_rate
,
0.004
)
def
testBuildExponentialDecayLearningRate
(
self
):
learning_rate_text_proto
=
"""
exponential_decay_learning_rate {
initial_learning_rate: 0.004
decay_steps: 99999
decay_factor: 0.85
staircase: false
}
"""
global_summaries
=
set
([])
learning_rate_proto
=
optimizer_pb2
.
LearningRate
()
text_format
.
Merge
(
learning_rate_text_proto
,
learning_rate_proto
)
learning_rate
=
optimizer_builder
.
_create_learning_rate
(
learning_rate_proto
,
global_summaries
)
self
.
assertTrue
(
isinstance
(
learning_rate
,
tf
.
Tensor
))
def
testBuildManualStepLearningRate
(
self
):
learning_rate_text_proto
=
"""
manual_step_learning_rate {
schedule {
step: 0
learning_rate: 0.006
}
schedule {
step: 90000
learning_rate: 0.00006
}
}
"""
global_summaries
=
set
([])
learning_rate_proto
=
optimizer_pb2
.
LearningRate
()
text_format
.
Merge
(
learning_rate_text_proto
,
learning_rate_proto
)
learning_rate
=
optimizer_builder
.
_create_learning_rate
(
learning_rate_proto
,
global_summaries
)
self
.
assertTrue
(
isinstance
(
learning_rate
,
tf
.
Tensor
))
def
testRaiseErrorOnEmptyLearningRate
(
self
):
learning_rate_text_proto
=
"""
"""
global_summaries
=
set
([])
learning_rate_proto
=
optimizer_pb2
.
LearningRate
()
text_format
.
Merge
(
learning_rate_text_proto
,
learning_rate_proto
)
with
self
.
assertRaises
(
ValueError
):
optimizer_builder
.
_create_learning_rate
(
learning_rate_proto
,
global_summaries
)
class
OptimizerBuilderTest
(
tf
.
test
.
TestCase
):
def
testBuildRMSPropOptimizer
(
self
):
optimizer_text_proto
=
"""
rms_prop_optimizer: {
learning_rate: {
exponential_decay_learning_rate {
initial_learning_rate: 0.004
decay_steps: 800720
decay_factor: 0.95
}
}
momentum_optimizer_value: 0.9
decay: 0.9
epsilon: 1.0
}
use_moving_average: false
"""
global_summaries
=
set
([])
optimizer_proto
=
optimizer_pb2
.
Optimizer
()
text_format
.
Merge
(
optimizer_text_proto
,
optimizer_proto
)
optimizer
=
optimizer_builder
.
build
(
optimizer_proto
,
global_summaries
)
self
.
assertTrue
(
isinstance
(
optimizer
,
tf
.
train
.
RMSPropOptimizer
))
def
testBuildMomentumOptimizer
(
self
):
optimizer_text_proto
=
"""
momentum_optimizer: {
learning_rate: {
constant_learning_rate {
learning_rate: 0.001
}
}
momentum_optimizer_value: 0.99
}
use_moving_average: false
"""
global_summaries
=
set
([])
optimizer_proto
=
optimizer_pb2
.
Optimizer
()
text_format
.
Merge
(
optimizer_text_proto
,
optimizer_proto
)
optimizer
=
optimizer_builder
.
build
(
optimizer_proto
,
global_summaries
)
self
.
assertTrue
(
isinstance
(
optimizer
,
tf
.
train
.
MomentumOptimizer
))
def
testBuildAdamOptimizer
(
self
):
optimizer_text_proto
=
"""
adam_optimizer: {
learning_rate: {
constant_learning_rate {
learning_rate: 0.002
}
}
}
use_moving_average: false
"""
global_summaries
=
set
([])
optimizer_proto
=
optimizer_pb2
.
Optimizer
()
text_format
.
Merge
(
optimizer_text_proto
,
optimizer_proto
)
optimizer
=
optimizer_builder
.
build
(
optimizer_proto
,
global_summaries
)
self
.
assertTrue
(
isinstance
(
optimizer
,
tf
.
train
.
AdamOptimizer
))
def
testBuildMovingAverageOptimizer
(
self
):
optimizer_text_proto
=
"""
adam_optimizer: {
learning_rate: {
constant_learning_rate {
learning_rate: 0.002
}
}
}
use_moving_average: True
"""
global_summaries
=
set
([])
optimizer_proto
=
optimizer_pb2
.
Optimizer
()
text_format
.
Merge
(
optimizer_text_proto
,
optimizer_proto
)
optimizer
=
optimizer_builder
.
build
(
optimizer_proto
,
global_summaries
)
self
.
assertTrue
(
isinstance
(
optimizer
,
tf
.
contrib
.
opt
.
MovingAverageOptimizer
))
def
testBuildMovingAverageOptimizerWithNonDefaultDecay
(
self
):
optimizer_text_proto
=
"""
adam_optimizer: {
learning_rate: {
constant_learning_rate {
learning_rate: 0.002
}
}
}
use_moving_average: True
moving_average_decay: 0.2
"""
global_summaries
=
set
([])
optimizer_proto
=
optimizer_pb2
.
Optimizer
()
text_format
.
Merge
(
optimizer_text_proto
,
optimizer_proto
)
optimizer
=
optimizer_builder
.
build
(
optimizer_proto
,
global_summaries
)
self
.
assertTrue
(
isinstance
(
optimizer
,
tf
.
contrib
.
opt
.
MovingAverageOptimizer
))
# TODO: Find a way to not depend on the private members.
self
.
assertAlmostEqual
(
optimizer
.
_ema
.
_decay
,
0.2
)
def
testBuildEmptyOptimizer
(
self
):
optimizer_text_proto
=
"""
"""
global_summaries
=
set
([])
optimizer_proto
=
optimizer_pb2
.
Optimizer
()
text_format
.
Merge
(
optimizer_text_proto
,
optimizer_proto
)
with
self
.
assertRaises
(
ValueError
):
optimizer_builder
.
build
(
optimizer_proto
,
global_summaries
)
if
__name__
==
'__main__'
:
tf
.
test
.
main
()
object_detection/builders/post_processing_builder.py
0 → 100644
View file @
44fa1d37
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Builder function for post processing operations."""
import
functools
import
tensorflow
as
tf
from
object_detection.core
import
post_processing
from
object_detection.protos
import
post_processing_pb2
def
build
(
post_processing_config
):
"""Builds callables for post-processing operations.
Builds callables for non-max suppression and score conversion based on the
configuration.
Non-max suppression callable takes `boxes`, `scores`, and optionally
`clip_window`, `parallel_iterations` and `scope` as inputs. It returns
`nms_boxes`, `nms_scores`, `nms_nms_classes` and `num_detections`. See
post_processing.batch_multiclass_non_max_suppression for the type and shape
of these tensors.
Score converter callable should be called with `input` tensor. The callable
returns the output from one of 3 tf operations based on the configuration -
tf.identity, tf.sigmoid or tf.nn.softmax. See tensorflow documentation for
argument and return value descriptions.
Args:
post_processing_config: post_processing.proto object containing the
parameters for the post-processing operations.
Returns:
non_max_suppressor_fn: Callable for non-max suppression.
score_converter_fn: Callable for score conversion.
Raises:
ValueError: if the post_processing_config is of incorrect type.
"""
if
not
isinstance
(
post_processing_config
,
post_processing_pb2
.
PostProcessing
):
raise
ValueError
(
'post_processing_config not of type '
'post_processing_pb2.Postprocessing.'
)
non_max_suppressor_fn
=
_build_non_max_suppressor
(
post_processing_config
.
batch_non_max_suppression
)
score_converter_fn
=
_build_score_converter
(
post_processing_config
.
score_converter
)
return
non_max_suppressor_fn
,
score_converter_fn
def
_build_non_max_suppressor
(
nms_config
):
"""Builds non-max suppresson based on the nms config.
Args:
nms_config: post_processing_pb2.PostProcessing.BatchNonMaxSuppression proto.
Returns:
non_max_suppressor_fn: Callable non-max suppressor.
Raises:
ValueError: On incorrect iou_threshold or on incompatible values of
max_total_detections and max_detections_per_class.
"""
if
nms_config
.
iou_threshold
<
0
or
nms_config
.
iou_threshold
>
1.0
:
raise
ValueError
(
'iou_threshold not in [0, 1.0].'
)
if
nms_config
.
max_detections_per_class
>
nms_config
.
max_total_detections
:
raise
ValueError
(
'max_detections_per_class should be no greater than '
'max_total_detections.'
)
non_max_suppressor_fn
=
functools
.
partial
(
post_processing
.
batch_multiclass_non_max_suppression
,
score_thresh
=
nms_config
.
score_threshold
,
iou_thresh
=
nms_config
.
iou_threshold
,
max_size_per_class
=
nms_config
.
max_detections_per_class
,
max_total_size
=
nms_config
.
max_total_detections
)
return
non_max_suppressor_fn
def
_build_score_converter
(
score_converter_config
):
"""Builds score converter based on the config.
Builds one of [tf.identity, tf.sigmoid, tf.softmax] score converters based on
the config.
Args:
score_converter_config: post_processing_pb2.PostProcessing.score_converter.
Returns:
Callable score converter op.
Raises:
ValueError: On unknown score converter.
"""
if
score_converter_config
==
post_processing_pb2
.
PostProcessing
.
IDENTITY
:
return
tf
.
identity
if
score_converter_config
==
post_processing_pb2
.
PostProcessing
.
SIGMOID
:
return
tf
.
sigmoid
if
score_converter_config
==
post_processing_pb2
.
PostProcessing
.
SOFTMAX
:
return
tf
.
nn
.
softmax
raise
ValueError
(
'Unknown score converter.'
)
object_detection/builders/post_processing_builder_test.py
0 → 100644
View file @
44fa1d37
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for post_processing_builder."""
import
tensorflow
as
tf
from
google.protobuf
import
text_format
from
object_detection.builders
import
post_processing_builder
from
object_detection.protos
import
post_processing_pb2
class
PostProcessingBuilderTest
(
tf
.
test
.
TestCase
):
def
test_build_non_max_suppressor_with_correct_parameters
(
self
):
post_processing_text_proto
=
"""
batch_non_max_suppression {
score_threshold: 0.7
iou_threshold: 0.6
max_detections_per_class: 100
max_total_detections: 300
}
"""
post_processing_config
=
post_processing_pb2
.
PostProcessing
()
text_format
.
Merge
(
post_processing_text_proto
,
post_processing_config
)
non_max_suppressor
,
_
=
post_processing_builder
.
build
(
post_processing_config
)
self
.
assertEqual
(
non_max_suppressor
.
keywords
[
'max_size_per_class'
],
100
)
self
.
assertEqual
(
non_max_suppressor
.
keywords
[
'max_total_size'
],
300
)
self
.
assertAlmostEqual
(
non_max_suppressor
.
keywords
[
'score_thresh'
],
0.7
)
self
.
assertAlmostEqual
(
non_max_suppressor
.
keywords
[
'iou_thresh'
],
0.6
)
def
test_build_identity_score_converter
(
self
):
post_processing_text_proto
=
"""
score_converter: IDENTITY
"""
post_processing_config
=
post_processing_pb2
.
PostProcessing
()
text_format
.
Merge
(
post_processing_text_proto
,
post_processing_config
)
_
,
score_converter
=
post_processing_builder
.
build
(
post_processing_config
)
self
.
assertEqual
(
score_converter
,
tf
.
identity
)
def
test_build_sigmoid_score_converter
(
self
):
post_processing_text_proto
=
"""
score_converter: SIGMOID
"""
post_processing_config
=
post_processing_pb2
.
PostProcessing
()
text_format
.
Merge
(
post_processing_text_proto
,
post_processing_config
)
_
,
score_converter
=
post_processing_builder
.
build
(
post_processing_config
)
self
.
assertEqual
(
score_converter
,
tf
.
sigmoid
)
def
test_build_softmax_score_converter
(
self
):
post_processing_text_proto
=
"""
score_converter: SOFTMAX
"""
post_processing_config
=
post_processing_pb2
.
PostProcessing
()
text_format
.
Merge
(
post_processing_text_proto
,
post_processing_config
)
_
,
score_converter
=
post_processing_builder
.
build
(
post_processing_config
)
self
.
assertEqual
(
score_converter
,
tf
.
nn
.
softmax
)
if
__name__
==
'__main__'
:
tf
.
test
.
main
()
object_detection/builders/preprocessor_builder.py
0 → 100644
View file @
44fa1d37
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Builder for preprocessing steps."""
import
tensorflow
as
tf
from
object_detection.core
import
preprocessor
from
object_detection.protos
import
preprocessor_pb2
def
_get_step_config_from_proto
(
preprocessor_step_config
,
step_name
):
"""Returns the value of a field named step_name from proto.
Args:
preprocessor_step_config: A preprocessor_pb2.PreprocessingStep object.
step_name: Name of the field to get value from.
Returns:
result_dict: a sub proto message from preprocessor_step_config which will be
later converted to a dictionary.
Raises:
ValueError: If field does not exist in proto.
"""
for
field
,
value
in
preprocessor_step_config
.
ListFields
():
if
field
.
name
==
step_name
:
return
value
raise
ValueError
(
'Could not get field %s from proto!'
,
step_name
)
def
_get_dict_from_proto
(
config
):
"""Helper function to put all proto fields into a dictionary.
For many preprocessing steps, there's an trivial 1-1 mapping from proto fields
to function arguments. This function automatically populates a dictionary with
the arguments from the proto.
Protos that CANNOT be trivially populated include:
* nested messages.
* steps that check if an optional field is set (ie. where None != 0).
* protos that don't map 1-1 to arguments (ie. list should be reshaped).
* fields requiring additional validation (ie. repeated field has n elements).
Args:
config: A protobuf object that does not violate the conditions above.
Returns:
result_dict: |config| converted into a python dictionary.
"""
result_dict
=
{}
for
field
,
value
in
config
.
ListFields
():
result_dict
[
field
.
name
]
=
value
return
result_dict
# A map from a PreprocessingStep proto config field name to the preprocessing
# function that should be used. The PreprocessingStep proto should be parsable
# with _get_dict_from_proto.
PREPROCESSING_FUNCTION_MAP
=
{
'normalize_image'
:
preprocessor
.
normalize_image
,
'random_horizontal_flip'
:
preprocessor
.
random_horizontal_flip
,
'random_pixel_value_scale'
:
preprocessor
.
random_pixel_value_scale
,
'random_image_scale'
:
preprocessor
.
random_image_scale
,
'random_rgb_to_gray'
:
preprocessor
.
random_rgb_to_gray
,
'random_adjust_brightness'
:
preprocessor
.
random_adjust_brightness
,
'random_adjust_contrast'
:
preprocessor
.
random_adjust_contrast
,
'random_adjust_hue'
:
preprocessor
.
random_adjust_hue
,
'random_adjust_saturation'
:
preprocessor
.
random_adjust_saturation
,
'random_distort_color'
:
preprocessor
.
random_distort_color
,
'random_jitter_boxes'
:
preprocessor
.
random_jitter_boxes
,
'random_crop_to_aspect_ratio'
:
preprocessor
.
random_crop_to_aspect_ratio
,
'random_black_patches'
:
preprocessor
.
random_black_patches
,
'scale_boxes_to_pixel_coordinates'
:
(
preprocessor
.
scale_boxes_to_pixel_coordinates
),
'subtract_channel_mean'
:
preprocessor
.
subtract_channel_mean
,
}
# A map to convert from preprocessor_pb2.ResizeImage.Method enum to
# tf.image.ResizeMethod.
RESIZE_METHOD_MAP
=
{
preprocessor_pb2
.
ResizeImage
.
AREA
:
tf
.
image
.
ResizeMethod
.
AREA
,
preprocessor_pb2
.
ResizeImage
.
BICUBIC
:
tf
.
image
.
ResizeMethod
.
BICUBIC
,
preprocessor_pb2
.
ResizeImage
.
BILINEAR
:
tf
.
image
.
ResizeMethod
.
BILINEAR
,
preprocessor_pb2
.
ResizeImage
.
NEAREST_NEIGHBOR
:
(
tf
.
image
.
ResizeMethod
.
NEAREST_NEIGHBOR
),
}
def
build
(
preprocessor_step_config
):
"""Builds preprocessing step based on the configuration.
Args:
preprocessor_step_config: PreprocessingStep configuration proto.
Returns:
function, argmap: A callable function and an argument map to call function
with.
Raises:
ValueError: On invalid configuration.
"""
step_type
=
preprocessor_step_config
.
WhichOneof
(
'preprocessing_step'
)
if
step_type
in
PREPROCESSING_FUNCTION_MAP
:
preprocessing_function
=
PREPROCESSING_FUNCTION_MAP
[
step_type
]
step_config
=
_get_step_config_from_proto
(
preprocessor_step_config
,
step_type
)
function_args
=
_get_dict_from_proto
(
step_config
)
return
(
preprocessing_function
,
function_args
)
if
step_type
==
'random_crop_image'
:
config
=
preprocessor_step_config
.
random_crop_image
return
(
preprocessor
.
random_crop_image
,
{
'min_object_covered'
:
config
.
min_object_covered
,
'aspect_ratio_range'
:
(
config
.
min_aspect_ratio
,
config
.
max_aspect_ratio
),
'area_range'
:
(
config
.
min_area
,
config
.
max_area
),
'overlap_thresh'
:
config
.
overlap_thresh
,
'random_coef'
:
config
.
random_coef
,
})
if
step_type
==
'random_pad_image'
:
config
=
preprocessor_step_config
.
random_pad_image
min_image_size
=
None
if
(
config
.
HasField
(
'min_image_height'
)
!=
config
.
HasField
(
'min_image_width'
)):
raise
ValueError
(
'min_image_height and min_image_width should be either '
'both set or both unset.'
)
if
config
.
HasField
(
'min_image_height'
):
min_image_size
=
(
config
.
min_image_height
,
config
.
min_image_width
)
max_image_size
=
None
if
(
config
.
HasField
(
'max_image_height'
)
!=
config
.
HasField
(
'max_image_width'
)):
raise
ValueError
(
'max_image_height and max_image_width should be either '
'both set or both unset.'
)
if
config
.
HasField
(
'max_image_height'
):
max_image_size
=
(
config
.
max_image_height
,
config
.
max_image_width
)
pad_color
=
config
.
pad_color
if
pad_color
and
len
(
pad_color
)
!=
3
:
raise
ValueError
(
'pad_color should have 3 elements (RGB) if set!'
)
if
not
pad_color
:
pad_color
=
None
return
(
preprocessor
.
random_pad_image
,
{
'min_image_size'
:
min_image_size
,
'max_image_size'
:
max_image_size
,
'pad_color'
:
pad_color
,
})
if
step_type
==
'random_crop_pad_image'
:
config
=
preprocessor_step_config
.
random_crop_pad_image
min_padded_size_ratio
=
config
.
min_padded_size_ratio
if
min_padded_size_ratio
and
len
(
min_padded_size_ratio
)
!=
2
:
raise
ValueError
(
'min_padded_size_ratio should have 3 elements if set!'
)
max_padded_size_ratio
=
config
.
max_padded_size_ratio
if
max_padded_size_ratio
and
len
(
max_padded_size_ratio
)
!=
2
:
raise
ValueError
(
'max_padded_size_ratio should have 3 elements if set!'
)
pad_color
=
config
.
pad_color
if
pad_color
and
len
(
pad_color
)
!=
3
:
raise
ValueError
(
'pad_color should have 3 elements if set!'
)
return
(
preprocessor
.
random_crop_pad_image
,
{
'min_object_covered'
:
config
.
min_object_covered
,
'aspect_ratio_range'
:
(
config
.
min_aspect_ratio
,
config
.
max_aspect_ratio
),
'area_range'
:
(
config
.
min_area
,
config
.
max_area
),
'overlap_thresh'
:
config
.
overlap_thresh
,
'random_coef'
:
config
.
random_coef
,
'min_padded_size_ratio'
:
(
min_padded_size_ratio
if
min_padded_size_ratio
else
None
),
'max_padded_size_ratio'
:
(
max_padded_size_ratio
if
max_padded_size_ratio
else
None
),
'pad_color'
:
(
pad_color
if
pad_color
else
None
),
})
if
step_type
==
'random_resize_method'
:
config
=
preprocessor_step_config
.
random_resize_method
return
(
preprocessor
.
random_resize_method
,
{
'target_size'
:
[
config
.
target_height
,
config
.
target_width
],
})
if
step_type
==
'resize_image'
:
config
=
preprocessor_step_config
.
resize_image
method
=
RESIZE_METHOD_MAP
[
config
.
method
]
return
(
preprocessor
.
resize_image
,
{
'new_height'
:
config
.
new_height
,
'new_width'
:
config
.
new_width
,
'method'
:
method
})
if
step_type
==
'ssd_random_crop'
:
config
=
preprocessor_step_config
.
ssd_random_crop
if
config
.
operations
:
min_object_covered
=
[
op
.
min_object_covered
for
op
in
config
.
operations
]
aspect_ratio_range
=
[(
op
.
min_aspect_ratio
,
op
.
max_aspect_ratio
)
for
op
in
config
.
operations
]
area_range
=
[(
op
.
min_area
,
op
.
max_area
)
for
op
in
config
.
operations
]
overlap_thresh
=
[
op
.
overlap_thresh
for
op
in
config
.
operations
]
random_coef
=
[
op
.
random_coef
for
op
in
config
.
operations
]
return
(
preprocessor
.
ssd_random_crop
,
{
'min_object_covered'
:
min_object_covered
,
'aspect_ratio_range'
:
aspect_ratio_range
,
'area_range'
:
area_range
,
'overlap_thresh'
:
overlap_thresh
,
'random_coef'
:
random_coef
,
})
return
(
preprocessor
.
ssd_random_crop
,
{})
if
step_type
==
'ssd_random_crop_pad'
:
config
=
preprocessor_step_config
.
ssd_random_crop_pad
if
config
.
operations
:
min_object_covered
=
[
op
.
min_object_covered
for
op
in
config
.
operations
]
aspect_ratio_range
=
[(
op
.
min_aspect_ratio
,
op
.
max_aspect_ratio
)
for
op
in
config
.
operations
]
area_range
=
[(
op
.
min_area
,
op
.
max_area
)
for
op
in
config
.
operations
]
overlap_thresh
=
[
op
.
overlap_thresh
for
op
in
config
.
operations
]
random_coef
=
[
op
.
random_coef
for
op
in
config
.
operations
]
min_padded_size_ratio
=
[
(
op
.
min_padded_size_ratio
[
0
],
op
.
min_padded_size_ratio
[
1
])
for
op
in
config
.
operations
]
max_padded_size_ratio
=
[
(
op
.
max_padded_size_ratio
[
0
],
op
.
max_padded_size_ratio
[
1
])
for
op
in
config
.
operations
]
pad_color
=
[(
op
.
pad_color_r
,
op
.
pad_color_g
,
op
.
pad_color_b
)
for
op
in
config
.
operations
]
return
(
preprocessor
.
ssd_random_crop_pad
,
{
'min_object_covered'
:
min_object_covered
,
'aspect_ratio_range'
:
aspect_ratio_range
,
'area_range'
:
area_range
,
'overlap_thresh'
:
overlap_thresh
,
'random_coef'
:
random_coef
,
'min_padded_size_ratio'
:
min_padded_size_ratio
,
'max_padded_size_ratio'
:
max_padded_size_ratio
,
'pad_color'
:
pad_color
,
})
return
(
preprocessor
.
ssd_random_crop_pad
,
{})
if
step_type
==
'ssd_random_crop_fixed_aspect_ratio'
:
config
=
preprocessor_step_config
.
ssd_random_crop_fixed_aspect_ratio
if
config
.
operations
:
min_object_covered
=
[
op
.
min_object_covered
for
op
in
config
.
operations
]
area_range
=
[(
op
.
min_area
,
op
.
max_area
)
for
op
in
config
.
operations
]
overlap_thresh
=
[
op
.
overlap_thresh
for
op
in
config
.
operations
]
random_coef
=
[
op
.
random_coef
for
op
in
config
.
operations
]
return
(
preprocessor
.
ssd_random_crop_fixed_aspect_ratio
,
{
'min_object_covered'
:
min_object_covered
,
'aspect_ratio'
:
config
.
aspect_ratio
,
'area_range'
:
area_range
,
'overlap_thresh'
:
overlap_thresh
,
'random_coef'
:
random_coef
,
})
return
(
preprocessor
.
ssd_random_crop_fixed_aspect_ratio
,
{})
raise
ValueError
(
'Unknown preprocessing step.'
)
Prev
1
…
7
8
9
10
11
12
13
14
15
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment