Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ModelZoo
ResNet50_tensorflow
Commits
f282f6ef
Commit
f282f6ef
authored
Jul 05, 2017
by
Alexander Gorban
Browse files
Merge branch 'master' of github.com:tensorflow/models
parents
58a5da7b
a2970b03
Changes
302
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
2939 additions
and
0 deletions
+2939
-0
object_detection/box_coders/mean_stddev_box_coder_test.py
object_detection/box_coders/mean_stddev_box_coder_test.py
+58
-0
object_detection/box_coders/square_box_coder.py
object_detection/box_coders/square_box_coder.py
+126
-0
object_detection/box_coders/square_box_coder_test.py
object_detection/box_coders/square_box_coder_test.py
+97
-0
object_detection/builders/BUILD
object_detection/builders/BUILD
+296
-0
object_detection/builders/__init__.py
object_detection/builders/__init__.py
+0
-0
object_detection/builders/anchor_generator_builder.py
object_detection/builders/anchor_generator_builder.py
+66
-0
object_detection/builders/anchor_generator_builder_test.py
object_detection/builders/anchor_generator_builder_test.py
+194
-0
object_detection/builders/box_coder_builder.py
object_detection/builders/box_coder_builder.py
+55
-0
object_detection/builders/box_coder_builder_test.py
object_detection/builders/box_coder_builder_test.py
+107
-0
object_detection/builders/box_predictor_builder.py
object_detection/builders/box_predictor_builder.py
+106
-0
object_detection/builders/box_predictor_builder_test.py
object_detection/builders/box_predictor_builder_test.py
+391
-0
object_detection/builders/hyperparams_builder.py
object_detection/builders/hyperparams_builder.py
+169
-0
object_detection/builders/hyperparams_builder_test.py
object_detection/builders/hyperparams_builder_test.py
+450
-0
object_detection/builders/image_resizer_builder.py
object_detection/builders/image_resizer_builder.py
+62
-0
object_detection/builders/image_resizer_builder_test.py
object_detection/builders/image_resizer_builder_test.py
+70
-0
object_detection/builders/input_reader_builder.py
object_detection/builders/input_reader_builder.py
+65
-0
object_detection/builders/input_reader_builder_test.py
object_detection/builders/input_reader_builder_test.py
+92
-0
object_detection/builders/losses_builder.py
object_detection/builders/losses_builder.py
+161
-0
object_detection/builders/losses_builder_test.py
object_detection/builders/losses_builder_test.py
+323
-0
object_detection/builders/matcher_builder.py
object_detection/builders/matcher_builder.py
+51
-0
No files found.
Too many changes to show.
To preserve performance only
302 of 302+
files are displayed.
Plain diff
Email patch
object_detection/box_coders/mean_stddev_box_coder_test.py
0 → 100644
View file @
f282f6ef
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.box_coder.mean_stddev_boxcoder."""
import
tensorflow
as
tf
from
object_detection.box_coders
import
mean_stddev_box_coder
from
object_detection.core
import
box_list
class
MeanStddevBoxCoderTest
(
tf
.
test
.
TestCase
):
def
testGetCorrectRelativeCodesAfterEncoding
(
self
):
box_corners
=
[[
0.0
,
0.0
,
0.5
,
0.5
],
[
0.0
,
0.0
,
0.5
,
0.5
]]
boxes
=
box_list
.
BoxList
(
tf
.
constant
(
box_corners
))
expected_rel_codes
=
[[
0.0
,
0.0
,
0.0
,
0.0
],
[
-
5.0
,
-
5.0
,
-
5.0
,
-
3.0
]]
prior_means
=
tf
.
constant
([[
0.0
,
0.0
,
0.5
,
0.5
],
[
0.5
,
0.5
,
1.0
,
0.8
]])
prior_stddevs
=
tf
.
constant
(
2
*
[
4
*
[.
1
]])
priors
=
box_list
.
BoxList
(
prior_means
)
priors
.
add_field
(
'stddev'
,
prior_stddevs
)
coder
=
mean_stddev_box_coder
.
MeanStddevBoxCoder
()
rel_codes
=
coder
.
encode
(
boxes
,
priors
)
with
self
.
test_session
()
as
sess
:
rel_codes_out
=
sess
.
run
(
rel_codes
)
self
.
assertAllClose
(
rel_codes_out
,
expected_rel_codes
)
def
testGetCorrectBoxesAfterDecoding
(
self
):
rel_codes
=
tf
.
constant
([[
0.0
,
0.0
,
0.0
,
0.0
],
[
-
5.0
,
-
5.0
,
-
5.0
,
-
3.0
]])
expected_box_corners
=
[[
0.0
,
0.0
,
0.5
,
0.5
],
[
0.0
,
0.0
,
0.5
,
0.5
]]
prior_means
=
tf
.
constant
([[
0.0
,
0.0
,
0.5
,
0.5
],
[
0.5
,
0.5
,
1.0
,
0.8
]])
prior_stddevs
=
tf
.
constant
(
2
*
[
4
*
[.
1
]])
priors
=
box_list
.
BoxList
(
prior_means
)
priors
.
add_field
(
'stddev'
,
prior_stddevs
)
coder
=
mean_stddev_box_coder
.
MeanStddevBoxCoder
()
decoded_boxes
=
coder
.
decode
(
rel_codes
,
priors
)
decoded_box_corners
=
decoded_boxes
.
get
()
with
self
.
test_session
()
as
sess
:
decoded_out
=
sess
.
run
(
decoded_box_corners
)
self
.
assertAllClose
(
decoded_out
,
expected_box_corners
)
if
__name__
==
'__main__'
:
tf
.
test
.
main
()
object_detection/box_coders/square_box_coder.py
0 → 100644
View file @
f282f6ef
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Square box coder.
Square box coder follows the coding schema described below:
l = sqrt(h * w)
la = sqrt(ha * wa)
ty = (y - ya) / la
tx = (x - xa) / la
tl = log(l / la)
where x, y, w, h denote the box's center coordinates, width, and height,
respectively. Similarly, xa, ya, wa, ha denote the anchor's center
coordinates, width and height. tx, ty, tl denote the anchor-encoded
center, and length, respectively. Because the encoded box is a square, only
one length is encoded.
This has shown to provide performance improvements over the Faster RCNN box
coder when the objects being detected tend to be square (e.g. faces) and when
the input images are not distorted via resizing.
"""
import
tensorflow
as
tf
from
object_detection.core
import
box_coder
from
object_detection.core
import
box_list
EPSILON
=
1e-8
class
SquareBoxCoder
(
box_coder
.
BoxCoder
):
"""Encodes a 3-scalar representation of a square box."""
def
__init__
(
self
,
scale_factors
=
None
):
"""Constructor for SquareBoxCoder.
Args:
scale_factors: List of 3 positive scalars to scale ty, tx, and tl.
If set to None, does not perform scaling. For faster RCNN,
the open-source implementation recommends using [10.0, 10.0, 5.0].
Raises:
ValueError: If scale_factors is not length 3 or contains values less than
or equal to 0.
"""
if
scale_factors
:
if
len
(
scale_factors
)
!=
3
:
raise
ValueError
(
'The argument scale_factors must be a list of length '
'3.'
)
if
any
(
scalar
<=
0
for
scalar
in
scale_factors
):
raise
ValueError
(
'The values in scale_factors must all be greater '
'than 0.'
)
self
.
_scale_factors
=
scale_factors
@
property
def
code_size
(
self
):
return
3
def
_encode
(
self
,
boxes
,
anchors
):
"""Encodes a box collection with respect to an anchor collection.
Args:
boxes: BoxList holding N boxes to be encoded.
anchors: BoxList of anchors.
Returns:
a tensor representing N anchor-encoded boxes of the format
[ty, tx, tl].
"""
# Convert anchors to the center coordinate representation.
ycenter_a
,
xcenter_a
,
ha
,
wa
=
anchors
.
get_center_coordinates_and_sizes
()
la
=
tf
.
sqrt
(
ha
*
wa
)
ycenter
,
xcenter
,
h
,
w
=
boxes
.
get_center_coordinates_and_sizes
()
l
=
tf
.
sqrt
(
h
*
w
)
# Avoid NaN in division and log below.
la
+=
EPSILON
l
+=
EPSILON
tx
=
(
xcenter
-
xcenter_a
)
/
la
ty
=
(
ycenter
-
ycenter_a
)
/
la
tl
=
tf
.
log
(
l
/
la
)
# Scales location targets for joint training.
if
self
.
_scale_factors
:
ty
*=
self
.
_scale_factors
[
0
]
tx
*=
self
.
_scale_factors
[
1
]
tl
*=
self
.
_scale_factors
[
2
]
return
tf
.
transpose
(
tf
.
stack
([
ty
,
tx
,
tl
]))
def
_decode
(
self
,
rel_codes
,
anchors
):
"""Decodes relative codes to boxes.
Args:
rel_codes: a tensor representing N anchor-encoded boxes.
anchors: BoxList of anchors.
Returns:
boxes: BoxList holding N bounding boxes.
"""
ycenter_a
,
xcenter_a
,
ha
,
wa
=
anchors
.
get_center_coordinates_and_sizes
()
la
=
tf
.
sqrt
(
ha
*
wa
)
ty
,
tx
,
tl
=
tf
.
unstack
(
tf
.
transpose
(
rel_codes
))
if
self
.
_scale_factors
:
ty
/=
self
.
_scale_factors
[
0
]
tx
/=
self
.
_scale_factors
[
1
]
tl
/=
self
.
_scale_factors
[
2
]
l
=
tf
.
exp
(
tl
)
*
la
ycenter
=
ty
*
la
+
ycenter_a
xcenter
=
tx
*
la
+
xcenter_a
ymin
=
ycenter
-
l
/
2.
xmin
=
xcenter
-
l
/
2.
ymax
=
ycenter
+
l
/
2.
xmax
=
xcenter
+
l
/
2.
return
box_list
.
BoxList
(
tf
.
transpose
(
tf
.
stack
([
ymin
,
xmin
,
ymax
,
xmax
])))
object_detection/box_coders/square_box_coder_test.py
0 → 100644
View file @
f282f6ef
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.box_coder.square_box_coder."""
import
tensorflow
as
tf
from
object_detection.box_coders
import
square_box_coder
from
object_detection.core
import
box_list
class
SquareBoxCoderTest
(
tf
.
test
.
TestCase
):
def
test_correct_relative_codes_with_default_scale
(
self
):
boxes
=
[[
10.0
,
10.0
,
20.0
,
15.0
],
[
0.2
,
0.1
,
0.5
,
0.4
]]
anchors
=
[[
15.0
,
12.0
,
30.0
,
18.0
],
[
0.1
,
0.0
,
0.7
,
0.9
]]
scale_factors
=
None
expected_rel_codes
=
[[
-
0.790569
,
-
0.263523
,
-
0.293893
],
[
-
0.068041
,
-
0.272166
,
-
0.89588
]]
boxes
=
box_list
.
BoxList
(
tf
.
constant
(
boxes
))
anchors
=
box_list
.
BoxList
(
tf
.
constant
(
anchors
))
coder
=
square_box_coder
.
SquareBoxCoder
(
scale_factors
=
scale_factors
)
rel_codes
=
coder
.
encode
(
boxes
,
anchors
)
with
self
.
test_session
()
as
sess
:
(
rel_codes_out
,)
=
sess
.
run
([
rel_codes
])
self
.
assertAllClose
(
rel_codes_out
,
expected_rel_codes
)
def
test_correct_relative_codes_with_non_default_scale
(
self
):
boxes
=
[[
10.0
,
10.0
,
20.0
,
15.0
],
[
0.2
,
0.1
,
0.5
,
0.4
]]
anchors
=
[[
15.0
,
12.0
,
30.0
,
18.0
],
[
0.1
,
0.0
,
0.7
,
0.9
]]
scale_factors
=
[
2
,
3
,
4
]
expected_rel_codes
=
[[
-
1.581139
,
-
0.790569
,
-
1.175573
],
[
-
0.136083
,
-
0.816497
,
-
3.583519
]]
boxes
=
box_list
.
BoxList
(
tf
.
constant
(
boxes
))
anchors
=
box_list
.
BoxList
(
tf
.
constant
(
anchors
))
coder
=
square_box_coder
.
SquareBoxCoder
(
scale_factors
=
scale_factors
)
rel_codes
=
coder
.
encode
(
boxes
,
anchors
)
with
self
.
test_session
()
as
sess
:
(
rel_codes_out
,)
=
sess
.
run
([
rel_codes
])
self
.
assertAllClose
(
rel_codes_out
,
expected_rel_codes
)
def
test_correct_relative_codes_with_small_width
(
self
):
boxes
=
[[
10.0
,
10.0
,
10.0000001
,
20.0
]]
anchors
=
[[
15.0
,
12.0
,
30.0
,
18.0
]]
scale_factors
=
None
expected_rel_codes
=
[[
-
1.317616
,
0.
,
-
20.670586
]]
boxes
=
box_list
.
BoxList
(
tf
.
constant
(
boxes
))
anchors
=
box_list
.
BoxList
(
tf
.
constant
(
anchors
))
coder
=
square_box_coder
.
SquareBoxCoder
(
scale_factors
=
scale_factors
)
rel_codes
=
coder
.
encode
(
boxes
,
anchors
)
with
self
.
test_session
()
as
sess
:
(
rel_codes_out
,)
=
sess
.
run
([
rel_codes
])
self
.
assertAllClose
(
rel_codes_out
,
expected_rel_codes
)
def
test_correct_boxes_with_default_scale
(
self
):
anchors
=
[[
15.0
,
12.0
,
30.0
,
18.0
],
[
0.1
,
0.0
,
0.7
,
0.9
]]
rel_codes
=
[[
-
0.5
,
-
0.416666
,
-
0.405465
],
[
-
0.083333
,
-
0.222222
,
-
0.693147
]]
scale_factors
=
None
expected_boxes
=
[[
14.594306
,
7.884875
,
20.918861
,
14.209432
],
[
0.155051
,
0.102989
,
0.522474
,
0.470412
]]
anchors
=
box_list
.
BoxList
(
tf
.
constant
(
anchors
))
coder
=
square_box_coder
.
SquareBoxCoder
(
scale_factors
=
scale_factors
)
boxes
=
coder
.
decode
(
rel_codes
,
anchors
)
with
self
.
test_session
()
as
sess
:
(
boxes_out
,)
=
sess
.
run
([
boxes
.
get
()])
self
.
assertAllClose
(
boxes_out
,
expected_boxes
)
def
test_correct_boxes_with_non_default_scale
(
self
):
anchors
=
[[
15.0
,
12.0
,
30.0
,
18.0
],
[
0.1
,
0.0
,
0.7
,
0.9
]]
rel_codes
=
[[
-
1.
,
-
1.25
,
-
1.62186
],
[
-
0.166667
,
-
0.666667
,
-
2.772588
]]
scale_factors
=
[
2
,
3
,
4
]
expected_boxes
=
[[
14.594306
,
7.884875
,
20.918861
,
14.209432
],
[
0.155051
,
0.102989
,
0.522474
,
0.470412
]]
anchors
=
box_list
.
BoxList
(
tf
.
constant
(
anchors
))
coder
=
square_box_coder
.
SquareBoxCoder
(
scale_factors
=
scale_factors
)
boxes
=
coder
.
decode
(
rel_codes
,
anchors
)
with
self
.
test_session
()
as
sess
:
(
boxes_out
,)
=
sess
.
run
([
boxes
.
get
()])
self
.
assertAllClose
(
boxes_out
,
expected_boxes
)
if
__name__
==
'__main__'
:
tf
.
test
.
main
()
object_detection/builders/BUILD
0 → 100644
View file @
f282f6ef
# Tensorflow Object Detection API: component builders.
package
(
default_visibility
=
[
"//visibility:public"
],
)
licenses
([
"notice"
])
# Apache 2.0
py_library
(
name
=
"model_builder"
,
srcs
=
[
"model_builder.py"
],
deps
=
[
":anchor_generator_builder"
,
":box_coder_builder"
,
":box_predictor_builder"
,
":hyperparams_builder"
,
":image_resizer_builder"
,
":losses_builder"
,
":matcher_builder"
,
":post_processing_builder"
,
":region_similarity_calculator_builder"
,
"//tensorflow_models/object_detection/core:box_predictor"
,
"//tensorflow_models/object_detection/meta_architectures:faster_rcnn_meta_arch"
,
"//tensorflow_models/object_detection/meta_architectures:rfcn_meta_arch"
,
"//tensorflow_models/object_detection/meta_architectures:ssd_meta_arch"
,
"//tensorflow_models/object_detection/models:faster_rcnn_inception_resnet_v2_feature_extractor"
,
"//tensorflow_models/object_detection/models:faster_rcnn_resnet_v1_feature_extractor"
,
"//tensorflow_models/object_detection/models:ssd_inception_v2_feature_extractor"
,
"//tensorflow_models/object_detection/models:ssd_mobilenet_v1_feature_extractor"
,
"//tensorflow_models/object_detection/protos:model_py_pb2"
,
],
)
py_test
(
name
=
"model_builder_test"
,
srcs
=
[
"model_builder_test.py"
],
deps
=
[
":model_builder"
,
"//tensorflow"
,
"//tensorflow_models/object_detection/meta_architectures:faster_rcnn_meta_arch"
,
"//tensorflow_models/object_detection/meta_architectures:ssd_meta_arch"
,
"//tensorflow_models/object_detection/models:ssd_inception_v2_feature_extractor"
,
"//tensorflow_models/object_detection/models:ssd_mobilenet_v1_feature_extractor"
,
"//tensorflow_models/object_detection/protos:model_py_pb2"
,
],
)
py_library
(
name
=
"matcher_builder"
,
srcs
=
[
"matcher_builder.py"
],
deps
=
[
"//tensorflow_models/object_detection/matchers:argmax_matcher"
,
"//tensorflow_models/object_detection/matchers:bipartite_matcher"
,
"//tensorflow_models/object_detection/protos:matcher_py_pb2"
,
],
)
py_test
(
name
=
"matcher_builder_test"
,
srcs
=
[
"matcher_builder_test.py"
],
deps
=
[
":matcher_builder"
,
"//tensorflow_models/object_detection/matchers:argmax_matcher"
,
"//tensorflow_models/object_detection/matchers:bipartite_matcher"
,
"//tensorflow_models/object_detection/protos:matcher_py_pb2"
,
],
)
py_library
(
name
=
"box_coder_builder"
,
srcs
=
[
"box_coder_builder.py"
],
deps
=
[
"//tensorflow_models/object_detection/box_coders:faster_rcnn_box_coder"
,
"//tensorflow_models/object_detection/box_coders:mean_stddev_box_coder"
,
"//tensorflow_models/object_detection/box_coders:square_box_coder"
,
"//tensorflow_models/object_detection/protos:box_coder_py_pb2"
,
],
)
py_test
(
name
=
"box_coder_builder_test"
,
srcs
=
[
"box_coder_builder_test.py"
],
deps
=
[
":box_coder_builder"
,
"//tensorflow"
,
"//tensorflow_models/object_detection/box_coders:faster_rcnn_box_coder"
,
"//tensorflow_models/object_detection/box_coders:mean_stddev_box_coder"
,
"//tensorflow_models/object_detection/box_coders:square_box_coder"
,
"//tensorflow_models/object_detection/protos:box_coder_py_pb2"
,
],
)
py_library
(
name
=
"anchor_generator_builder"
,
srcs
=
[
"anchor_generator_builder.py"
],
deps
=
[
"//tensorflow_models/object_detection/anchor_generators:grid_anchor_generator"
,
"//tensorflow_models/object_detection/anchor_generators:multiple_grid_anchor_generator"
,
"//tensorflow_models/object_detection/protos:anchor_generator_py_pb2"
,
],
)
py_test
(
name
=
"anchor_generator_builder_test"
,
srcs
=
[
"anchor_generator_builder_test.py"
],
deps
=
[
":anchor_generator_builder"
,
"//tensorflow"
,
"//tensorflow_models/object_detection/anchor_generators:grid_anchor_generator"
,
"//tensorflow_models/object_detection/anchor_generators:multiple_grid_anchor_generator"
,
"//tensorflow_models/object_detection/protos:anchor_generator_py_pb2"
,
],
)
py_library
(
name
=
"input_reader_builder"
,
srcs
=
[
"input_reader_builder.py"
],
deps
=
[
"//tensorflow"
,
"//tensorflow_models/object_detection/data_decoders:tf_example_decoder"
,
"//tensorflow_models/object_detection/protos:input_reader_py_pb2"
,
],
)
py_test
(
name
=
"input_reader_builder_test"
,
srcs
=
[
"input_reader_builder_test.py"
,
],
deps
=
[
":input_reader_builder"
,
"//tensorflow"
,
"//tensorflow_models/object_detection/core:standard_fields"
,
"//tensorflow_models/object_detection/protos:input_reader_py_pb2"
,
],
)
py_library
(
name
=
"losses_builder"
,
srcs
=
[
"losses_builder.py"
],
deps
=
[
"//tensorflow_models/object_detection/core:losses"
,
"//tensorflow_models/object_detection/protos:losses_py_pb2"
,
],
)
py_test
(
name
=
"losses_builder_test"
,
srcs
=
[
"losses_builder_test.py"
],
deps
=
[
":losses_builder"
,
"//tensorflow_models/object_detection/core:losses"
,
"//tensorflow_models/object_detection/protos:losses_py_pb2"
,
],
)
py_library
(
name
=
"optimizer_builder"
,
srcs
=
[
"optimizer_builder.py"
],
deps
=
[
"//tensorflow"
,
"//tensorflow_models/object_detection/utils:learning_schedules"
,
],
)
py_test
(
name
=
"optimizer_builder_test"
,
srcs
=
[
"optimizer_builder_test.py"
],
deps
=
[
":optimizer_builder"
,
"//tensorflow"
,
"//tensorflow_models/object_detection/protos:optimizer_py_pb2"
,
],
)
py_library
(
name
=
"post_processing_builder"
,
srcs
=
[
"post_processing_builder.py"
],
deps
=
[
"//tensorflow"
,
"//tensorflow_models/object_detection/core:post_processing"
,
"//tensorflow_models/object_detection/protos:post_processing_py_pb2"
,
],
)
py_test
(
name
=
"post_processing_builder_test"
,
srcs
=
[
"post_processing_builder_test.py"
],
deps
=
[
":post_processing_builder"
,
"//tensorflow"
,
"//tensorflow_models/object_detection/protos:post_processing_py_pb2"
,
],
)
py_library
(
name
=
"hyperparams_builder"
,
srcs
=
[
"hyperparams_builder.py"
],
deps
=
[
"//tensorflow_models/object_detection/protos:hyperparams_py_pb2"
,
],
)
py_test
(
name
=
"hyperparams_builder_test"
,
srcs
=
[
"hyperparams_builder_test.py"
],
deps
=
[
":hyperparams_builder"
,
"//tensorflow"
,
"//tensorflow_models/object_detection/protos:hyperparams_py_pb2"
,
],
)
py_library
(
name
=
"box_predictor_builder"
,
srcs
=
[
"box_predictor_builder.py"
],
deps
=
[
":hyperparams_builder"
,
"//tensorflow_models/object_detection/core:box_predictor"
,
"//tensorflow_models/object_detection/protos:box_predictor_py_pb2"
,
],
)
py_test
(
name
=
"box_predictor_builder_test"
,
srcs
=
[
"box_predictor_builder_test.py"
],
deps
=
[
":box_predictor_builder"
,
":hyperparams_builder"
,
"//tensorflow"
,
"//tensorflow_models/object_detection/protos:box_predictor_py_pb2"
,
"//tensorflow_models/object_detection/protos:hyperparams_py_pb2"
,
],
)
py_library
(
name
=
"region_similarity_calculator_builder"
,
srcs
=
[
"region_similarity_calculator_builder.py"
],
deps
=
[
"//tensorflow_models/object_detection/core:region_similarity_calculator"
,
"//tensorflow_models/object_detection/protos:region_similarity_calculator_py_pb2"
,
],
)
py_test
(
name
=
"region_similarity_calculator_builder_test"
,
srcs
=
[
"region_similarity_calculator_builder_test.py"
],
deps
=
[
":region_similarity_calculator_builder"
,
"//tensorflow"
,
],
)
py_library
(
name
=
"preprocessor_builder"
,
srcs
=
[
"preprocessor_builder.py"
],
deps
=
[
"//tensorflow"
,
"//tensorflow_models/object_detection/core:preprocessor"
,
"//tensorflow_models/object_detection/protos:preprocessor_py_pb2"
,
],
)
py_test
(
name
=
"preprocessor_builder_test"
,
srcs
=
[
"preprocessor_builder_test.py"
,
],
deps
=
[
":preprocessor_builder"
,
"//tensorflow"
,
"//tensorflow_models/object_detection/core:preprocessor"
,
"//tensorflow_models/object_detection/protos:preprocessor_py_pb2"
,
],
)
py_library
(
name
=
"image_resizer_builder"
,
srcs
=
[
"image_resizer_builder.py"
],
deps
=
[
"//tensorflow"
,
"//tensorflow_models/object_detection/core:preprocessor"
,
"//tensorflow_models/object_detection/protos:image_resizer_py_pb2"
,
],
)
py_test
(
name
=
"image_resizer_builder_test"
,
srcs
=
[
"image_resizer_builder_test.py"
],
deps
=
[
":image_resizer_builder"
,
"//tensorflow"
,
"//tensorflow_models/object_detection/protos:image_resizer_py_pb2"
,
],
)
object_detection/builders/__init__.py
0 → 100644
View file @
f282f6ef
object_detection/builders/anchor_generator_builder.py
0 → 100644
View file @
f282f6ef
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A function to build an object detection anchor generator from config."""
from
object_detection.anchor_generators
import
grid_anchor_generator
from
object_detection.anchor_generators
import
multiple_grid_anchor_generator
from
object_detection.protos
import
anchor_generator_pb2
def
build
(
anchor_generator_config
):
"""Builds an anchor generator based on the config.
Args:
anchor_generator_config: An anchor_generator.proto object containing the
config for the desired anchor generator.
Returns:
Anchor generator based on the config.
Raises:
ValueError: On empty anchor generator proto.
"""
if
not
isinstance
(
anchor_generator_config
,
anchor_generator_pb2
.
AnchorGenerator
):
raise
ValueError
(
'anchor_generator_config not of type '
'anchor_generator_pb2.AnchorGenerator'
)
if
anchor_generator_config
.
WhichOneof
(
'anchor_generator_oneof'
)
==
'grid_anchor_generator'
:
grid_anchor_generator_config
=
anchor_generator_config
.
grid_anchor_generator
return
grid_anchor_generator
.
GridAnchorGenerator
(
scales
=
[
float
(
scale
)
for
scale
in
grid_anchor_generator_config
.
scales
],
aspect_ratios
=
[
float
(
aspect_ratio
)
for
aspect_ratio
in
grid_anchor_generator_config
.
aspect_ratios
],
base_anchor_size
=
[
grid_anchor_generator_config
.
height
,
grid_anchor_generator_config
.
width
],
anchor_stride
=
[
grid_anchor_generator_config
.
height_stride
,
grid_anchor_generator_config
.
width_stride
],
anchor_offset
=
[
grid_anchor_generator_config
.
height_offset
,
grid_anchor_generator_config
.
width_offset
])
elif
anchor_generator_config
.
WhichOneof
(
'anchor_generator_oneof'
)
==
'ssd_anchor_generator'
:
ssd_anchor_generator_config
=
anchor_generator_config
.
ssd_anchor_generator
return
multiple_grid_anchor_generator
.
create_ssd_anchors
(
num_layers
=
ssd_anchor_generator_config
.
num_layers
,
min_scale
=
ssd_anchor_generator_config
.
min_scale
,
max_scale
=
ssd_anchor_generator_config
.
max_scale
,
aspect_ratios
=
ssd_anchor_generator_config
.
aspect_ratios
,
reduce_boxes_in_lowest_layer
=
(
ssd_anchor_generator_config
.
reduce_boxes_in_lowest_layer
))
else
:
raise
ValueError
(
'Empty anchor generator.'
)
object_detection/builders/anchor_generator_builder_test.py
0 → 100644
View file @
f282f6ef
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for anchor_generator_builder."""
import
tensorflow
as
tf
from
google.protobuf
import
text_format
from
object_detection.anchor_generators
import
grid_anchor_generator
from
object_detection.anchor_generators
import
multiple_grid_anchor_generator
from
object_detection.builders
import
anchor_generator_builder
from
object_detection.protos
import
anchor_generator_pb2
class
AnchorGeneratorBuilderTest
(
tf
.
test
.
TestCase
):
def
assert_almost_list_equal
(
self
,
expected_list
,
actual_list
,
delta
=
None
):
self
.
assertEqual
(
len
(
expected_list
),
len
(
actual_list
))
for
expected_item
,
actual_item
in
zip
(
expected_list
,
actual_list
):
self
.
assertAlmostEqual
(
expected_item
,
actual_item
,
delta
=
delta
)
def
test_build_grid_anchor_generator_with_defaults
(
self
):
anchor_generator_text_proto
=
"""
grid_anchor_generator {
}
"""
anchor_generator_proto
=
anchor_generator_pb2
.
AnchorGenerator
()
text_format
.
Merge
(
anchor_generator_text_proto
,
anchor_generator_proto
)
anchor_generator_object
=
anchor_generator_builder
.
build
(
anchor_generator_proto
)
self
.
assertTrue
(
isinstance
(
anchor_generator_object
,
grid_anchor_generator
.
GridAnchorGenerator
))
self
.
assertListEqual
(
anchor_generator_object
.
_scales
,
[])
self
.
assertListEqual
(
anchor_generator_object
.
_aspect_ratios
,
[])
with
self
.
test_session
()
as
sess
:
base_anchor_size
,
anchor_offset
,
anchor_stride
=
sess
.
run
(
[
anchor_generator_object
.
_base_anchor_size
,
anchor_generator_object
.
_anchor_offset
,
anchor_generator_object
.
_anchor_stride
])
self
.
assertAllEqual
(
anchor_offset
,
[
0
,
0
])
self
.
assertAllEqual
(
anchor_stride
,
[
16
,
16
])
self
.
assertAllEqual
(
base_anchor_size
,
[
256
,
256
])
def
test_build_grid_anchor_generator_with_non_default_parameters
(
self
):
anchor_generator_text_proto
=
"""
grid_anchor_generator {
height: 128
width: 512
height_stride: 10
width_stride: 20
height_offset: 30
width_offset: 40
scales: [0.4, 2.2]
aspect_ratios: [0.3, 4.5]
}
"""
anchor_generator_proto
=
anchor_generator_pb2
.
AnchorGenerator
()
text_format
.
Merge
(
anchor_generator_text_proto
,
anchor_generator_proto
)
anchor_generator_object
=
anchor_generator_builder
.
build
(
anchor_generator_proto
)
self
.
assertTrue
(
isinstance
(
anchor_generator_object
,
grid_anchor_generator
.
GridAnchorGenerator
))
self
.
assert_almost_list_equal
(
anchor_generator_object
.
_scales
,
[
0.4
,
2.2
])
self
.
assert_almost_list_equal
(
anchor_generator_object
.
_aspect_ratios
,
[
0.3
,
4.5
])
with
self
.
test_session
()
as
sess
:
base_anchor_size
,
anchor_offset
,
anchor_stride
=
sess
.
run
(
[
anchor_generator_object
.
_base_anchor_size
,
anchor_generator_object
.
_anchor_offset
,
anchor_generator_object
.
_anchor_stride
])
self
.
assertAllEqual
(
anchor_offset
,
[
30
,
40
])
self
.
assertAllEqual
(
anchor_stride
,
[
10
,
20
])
self
.
assertAllEqual
(
base_anchor_size
,
[
128
,
512
])
def
test_build_ssd_anchor_generator_with_defaults
(
self
):
anchor_generator_text_proto
=
"""
ssd_anchor_generator {
aspect_ratios: [1.0]
}
"""
anchor_generator_proto
=
anchor_generator_pb2
.
AnchorGenerator
()
text_format
.
Merge
(
anchor_generator_text_proto
,
anchor_generator_proto
)
anchor_generator_object
=
anchor_generator_builder
.
build
(
anchor_generator_proto
)
self
.
assertTrue
(
isinstance
(
anchor_generator_object
,
multiple_grid_anchor_generator
.
MultipleGridAnchorGenerator
))
for
actual_scales
,
expected_scales
in
zip
(
list
(
anchor_generator_object
.
_scales
),
[(
0.1
,
0.2
,
0.2
),
(
0.35
,
0.418
),
(
0.499
,
0.570
),
(
0.649
,
0.721
),
(
0.799
,
0.871
),
(
0.949
,
0.974
)]):
self
.
assert_almost_list_equal
(
expected_scales
,
actual_scales
,
delta
=
1e-2
)
for
actual_aspect_ratio
,
expected_aspect_ratio
in
zip
(
list
(
anchor_generator_object
.
_aspect_ratios
),
[(
1.0
,
2.0
,
0.5
)]
+
5
*
[(
1.0
,
1.0
)]):
self
.
assert_almost_list_equal
(
expected_aspect_ratio
,
actual_aspect_ratio
)
with
self
.
test_session
()
as
sess
:
base_anchor_size
=
sess
.
run
(
anchor_generator_object
.
_base_anchor_size
)
self
.
assertAllClose
(
base_anchor_size
,
[
1.0
,
1.0
])
def
test_build_ssd_anchor_generator_withoud_reduced_boxes
(
self
):
anchor_generator_text_proto
=
"""
ssd_anchor_generator {
aspect_ratios: [1.0]
reduce_boxes_in_lowest_layer: false
}
"""
anchor_generator_proto
=
anchor_generator_pb2
.
AnchorGenerator
()
text_format
.
Merge
(
anchor_generator_text_proto
,
anchor_generator_proto
)
anchor_generator_object
=
anchor_generator_builder
.
build
(
anchor_generator_proto
)
self
.
assertTrue
(
isinstance
(
anchor_generator_object
,
multiple_grid_anchor_generator
.
MultipleGridAnchorGenerator
))
for
actual_scales
,
expected_scales
in
zip
(
list
(
anchor_generator_object
.
_scales
),
[(
0.2
,
0.264
),
(
0.35
,
0.418
),
(
0.499
,
0.570
),
(
0.649
,
0.721
),
(
0.799
,
0.871
),
(
0.949
,
0.974
)]):
self
.
assert_almost_list_equal
(
expected_scales
,
actual_scales
,
delta
=
1e-2
)
for
actual_aspect_ratio
,
expected_aspect_ratio
in
zip
(
list
(
anchor_generator_object
.
_aspect_ratios
),
6
*
[(
1.0
,
1.0
)]):
self
.
assert_almost_list_equal
(
expected_aspect_ratio
,
actual_aspect_ratio
)
with
self
.
test_session
()
as
sess
:
base_anchor_size
=
sess
.
run
(
anchor_generator_object
.
_base_anchor_size
)
self
.
assertAllClose
(
base_anchor_size
,
[
1.0
,
1.0
])
def
test_build_ssd_anchor_generator_with_non_default_parameters
(
self
):
anchor_generator_text_proto
=
"""
ssd_anchor_generator {
num_layers: 2
min_scale: 0.3
max_scale: 0.8
aspect_ratios: [2.0]
}
"""
anchor_generator_proto
=
anchor_generator_pb2
.
AnchorGenerator
()
text_format
.
Merge
(
anchor_generator_text_proto
,
anchor_generator_proto
)
anchor_generator_object
=
anchor_generator_builder
.
build
(
anchor_generator_proto
)
self
.
assertTrue
(
isinstance
(
anchor_generator_object
,
multiple_grid_anchor_generator
.
MultipleGridAnchorGenerator
))
for
actual_scales
,
expected_scales
in
zip
(
list
(
anchor_generator_object
.
_scales
),
[(
0.1
,
0.3
,
0.3
),
(
0.8
,)]):
self
.
assert_almost_list_equal
(
expected_scales
,
actual_scales
,
delta
=
1e-2
)
for
actual_aspect_ratio
,
expected_aspect_ratio
in
zip
(
list
(
anchor_generator_object
.
_aspect_ratios
),
[(
1.0
,
2.0
,
0.5
),
(
2.0
,)]):
self
.
assert_almost_list_equal
(
expected_aspect_ratio
,
actual_aspect_ratio
)
with
self
.
test_session
()
as
sess
:
base_anchor_size
=
sess
.
run
(
anchor_generator_object
.
_base_anchor_size
)
self
.
assertAllClose
(
base_anchor_size
,
[
1.0
,
1.0
])
def
test_raise_value_error_on_empty_anchor_genertor
(
self
):
anchor_generator_text_proto
=
"""
"""
anchor_generator_proto
=
anchor_generator_pb2
.
AnchorGenerator
()
text_format
.
Merge
(
anchor_generator_text_proto
,
anchor_generator_proto
)
with
self
.
assertRaises
(
ValueError
):
anchor_generator_builder
.
build
(
anchor_generator_proto
)
if
__name__
==
'__main__'
:
tf
.
test
.
main
()
object_detection/builders/box_coder_builder.py
0 → 100644
View file @
f282f6ef
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A function to build an object detection box coder from configuration."""
from
object_detection.box_coders
import
faster_rcnn_box_coder
from
object_detection.box_coders
import
mean_stddev_box_coder
from
object_detection.box_coders
import
square_box_coder
from
object_detection.protos
import
box_coder_pb2
def
build
(
box_coder_config
):
"""Builds a box coder object based on the box coder config.
Args:
box_coder_config: A box_coder.proto object containing the config for the
desired box coder.
Returns:
BoxCoder based on the config.
Raises:
ValueError: On empty box coder proto.
"""
if
not
isinstance
(
box_coder_config
,
box_coder_pb2
.
BoxCoder
):
raise
ValueError
(
'box_coder_config not of type box_coder_pb2.BoxCoder.'
)
if
box_coder_config
.
WhichOneof
(
'box_coder_oneof'
)
==
'faster_rcnn_box_coder'
:
return
faster_rcnn_box_coder
.
FasterRcnnBoxCoder
(
scale_factors
=
[
box_coder_config
.
faster_rcnn_box_coder
.
y_scale
,
box_coder_config
.
faster_rcnn_box_coder
.
x_scale
,
box_coder_config
.
faster_rcnn_box_coder
.
height_scale
,
box_coder_config
.
faster_rcnn_box_coder
.
width_scale
])
if
(
box_coder_config
.
WhichOneof
(
'box_coder_oneof'
)
==
'mean_stddev_box_coder'
):
return
mean_stddev_box_coder
.
MeanStddevBoxCoder
()
if
box_coder_config
.
WhichOneof
(
'box_coder_oneof'
)
==
'square_box_coder'
:
return
square_box_coder
.
SquareBoxCoder
(
scale_factors
=
[
box_coder_config
.
square_box_coder
.
y_scale
,
box_coder_config
.
square_box_coder
.
x_scale
,
box_coder_config
.
square_box_coder
.
length_scale
])
raise
ValueError
(
'Empty box coder.'
)
object_detection/builders/box_coder_builder_test.py
0 → 100644
View file @
f282f6ef
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for box_coder_builder."""
import
tensorflow
as
tf
from
google.protobuf
import
text_format
from
object_detection.box_coders
import
faster_rcnn_box_coder
from
object_detection.box_coders
import
mean_stddev_box_coder
from
object_detection.box_coders
import
square_box_coder
from
object_detection.builders
import
box_coder_builder
from
object_detection.protos
import
box_coder_pb2
class
BoxCoderBuilderTest
(
tf
.
test
.
TestCase
):
def
test_build_faster_rcnn_box_coder_with_defaults
(
self
):
box_coder_text_proto
=
"""
faster_rcnn_box_coder {
}
"""
box_coder_proto
=
box_coder_pb2
.
BoxCoder
()
text_format
.
Merge
(
box_coder_text_proto
,
box_coder_proto
)
box_coder_object
=
box_coder_builder
.
build
(
box_coder_proto
)
self
.
assertTrue
(
isinstance
(
box_coder_object
,
faster_rcnn_box_coder
.
FasterRcnnBoxCoder
))
self
.
assertEqual
(
box_coder_object
.
_scale_factors
,
[
10.0
,
10.0
,
5.0
,
5.0
])
def
test_build_faster_rcnn_box_coder_with_non_default_parameters
(
self
):
box_coder_text_proto
=
"""
faster_rcnn_box_coder {
y_scale: 6.0
x_scale: 3.0
height_scale: 7.0
width_scale: 8.0
}
"""
box_coder_proto
=
box_coder_pb2
.
BoxCoder
()
text_format
.
Merge
(
box_coder_text_proto
,
box_coder_proto
)
box_coder_object
=
box_coder_builder
.
build
(
box_coder_proto
)
self
.
assertTrue
(
isinstance
(
box_coder_object
,
faster_rcnn_box_coder
.
FasterRcnnBoxCoder
))
self
.
assertEqual
(
box_coder_object
.
_scale_factors
,
[
6.0
,
3.0
,
7.0
,
8.0
])
def
test_build_mean_stddev_box_coder
(
self
):
box_coder_text_proto
=
"""
mean_stddev_box_coder {
}
"""
box_coder_proto
=
box_coder_pb2
.
BoxCoder
()
text_format
.
Merge
(
box_coder_text_proto
,
box_coder_proto
)
box_coder_object
=
box_coder_builder
.
build
(
box_coder_proto
)
self
.
assertTrue
(
isinstance
(
box_coder_object
,
mean_stddev_box_coder
.
MeanStddevBoxCoder
))
def
test_build_square_box_coder_with_defaults
(
self
):
box_coder_text_proto
=
"""
square_box_coder {
}
"""
box_coder_proto
=
box_coder_pb2
.
BoxCoder
()
text_format
.
Merge
(
box_coder_text_proto
,
box_coder_proto
)
box_coder_object
=
box_coder_builder
.
build
(
box_coder_proto
)
self
.
assertTrue
(
isinstance
(
box_coder_object
,
square_box_coder
.
SquareBoxCoder
))
self
.
assertEqual
(
box_coder_object
.
_scale_factors
,
[
10.0
,
10.0
,
5.0
])
def
test_build_square_box_coder_with_non_default_parameters
(
self
):
box_coder_text_proto
=
"""
square_box_coder {
y_scale: 6.0
x_scale: 3.0
length_scale: 7.0
}
"""
box_coder_proto
=
box_coder_pb2
.
BoxCoder
()
text_format
.
Merge
(
box_coder_text_proto
,
box_coder_proto
)
box_coder_object
=
box_coder_builder
.
build
(
box_coder_proto
)
self
.
assertTrue
(
isinstance
(
box_coder_object
,
square_box_coder
.
SquareBoxCoder
))
self
.
assertEqual
(
box_coder_object
.
_scale_factors
,
[
6.0
,
3.0
,
7.0
])
def
test_raise_error_on_empty_box_coder
(
self
):
box_coder_text_proto
=
"""
"""
box_coder_proto
=
box_coder_pb2
.
BoxCoder
()
text_format
.
Merge
(
box_coder_text_proto
,
box_coder_proto
)
with
self
.
assertRaises
(
ValueError
):
box_coder_builder
.
build
(
box_coder_proto
)
if
__name__
==
'__main__'
:
tf
.
test
.
main
()
object_detection/builders/box_predictor_builder.py
0 → 100644
View file @
f282f6ef
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Function to build box predictor from configuration."""
from
object_detection.core
import
box_predictor
from
object_detection.protos
import
box_predictor_pb2
def
build
(
argscope_fn
,
box_predictor_config
,
is_training
,
num_classes
):
"""Builds box predictor based on the configuration.
Builds box predictor based on the configuration. See box_predictor.proto for
configurable options. Also, see box_predictor.py for more details.
Args:
argscope_fn: A function that takes the following inputs:
* hyperparams_pb2.Hyperparams proto
* a boolean indicating if the model is in training mode.
and returns a tf slim argscope for Conv and FC hyperparameters.
box_predictor_config: box_predictor_pb2.BoxPredictor proto containing
configuration.
is_training: Whether the models is in training mode.
num_classes: Number of classes to predict.
Returns:
box_predictor: box_predictor.BoxPredictor object.
Raises:
ValueError: On unknown box predictor.
"""
if
not
isinstance
(
box_predictor_config
,
box_predictor_pb2
.
BoxPredictor
):
raise
ValueError
(
'box_predictor_config not of type '
'box_predictor_pb2.BoxPredictor.'
)
box_predictor_oneof
=
box_predictor_config
.
WhichOneof
(
'box_predictor_oneof'
)
if
box_predictor_oneof
==
'convolutional_box_predictor'
:
conv_box_predictor
=
box_predictor_config
.
convolutional_box_predictor
conv_hyperparams
=
argscope_fn
(
conv_box_predictor
.
conv_hyperparams
,
is_training
)
box_predictor_object
=
box_predictor
.
ConvolutionalBoxPredictor
(
is_training
=
is_training
,
num_classes
=
num_classes
,
conv_hyperparams
=
conv_hyperparams
,
min_depth
=
conv_box_predictor
.
min_depth
,
max_depth
=
conv_box_predictor
.
max_depth
,
num_layers_before_predictor
=
(
conv_box_predictor
.
num_layers_before_predictor
),
use_dropout
=
conv_box_predictor
.
use_dropout
,
dropout_keep_prob
=
conv_box_predictor
.
dropout_keep_probability
,
kernel_size
=
conv_box_predictor
.
kernel_size
,
box_code_size
=
conv_box_predictor
.
box_code_size
,
apply_sigmoid_to_scores
=
conv_box_predictor
.
apply_sigmoid_to_scores
)
return
box_predictor_object
if
box_predictor_oneof
==
'mask_rcnn_box_predictor'
:
mask_rcnn_box_predictor
=
box_predictor_config
.
mask_rcnn_box_predictor
fc_hyperparams
=
argscope_fn
(
mask_rcnn_box_predictor
.
fc_hyperparams
,
is_training
)
conv_hyperparams
=
None
if
mask_rcnn_box_predictor
.
HasField
(
'conv_hyperparams'
):
conv_hyperparams
=
argscope_fn
(
mask_rcnn_box_predictor
.
conv_hyperparams
,
is_training
)
box_predictor_object
=
box_predictor
.
MaskRCNNBoxPredictor
(
is_training
=
is_training
,
num_classes
=
num_classes
,
fc_hyperparams
=
fc_hyperparams
,
use_dropout
=
mask_rcnn_box_predictor
.
use_dropout
,
dropout_keep_prob
=
mask_rcnn_box_predictor
.
dropout_keep_probability
,
box_code_size
=
mask_rcnn_box_predictor
.
box_code_size
,
conv_hyperparams
=
conv_hyperparams
,
predict_instance_masks
=
mask_rcnn_box_predictor
.
predict_instance_masks
,
mask_prediction_conv_depth
=
(
mask_rcnn_box_predictor
.
mask_prediction_conv_depth
),
predict_keypoints
=
mask_rcnn_box_predictor
.
predict_keypoints
)
return
box_predictor_object
if
box_predictor_oneof
==
'rfcn_box_predictor'
:
rfcn_box_predictor
=
box_predictor_config
.
rfcn_box_predictor
conv_hyperparams
=
argscope_fn
(
rfcn_box_predictor
.
conv_hyperparams
,
is_training
)
box_predictor_object
=
box_predictor
.
RfcnBoxPredictor
(
is_training
=
is_training
,
num_classes
=
num_classes
,
conv_hyperparams
=
conv_hyperparams
,
crop_size
=
[
rfcn_box_predictor
.
crop_height
,
rfcn_box_predictor
.
crop_width
],
num_spatial_bins
=
[
rfcn_box_predictor
.
num_spatial_bins_height
,
rfcn_box_predictor
.
num_spatial_bins_width
],
depth
=
rfcn_box_predictor
.
depth
,
box_code_size
=
rfcn_box_predictor
.
box_code_size
)
return
box_predictor_object
raise
ValueError
(
'Unknown box predictor: {}'
.
format
(
box_predictor_oneof
))
object_detection/builders/box_predictor_builder_test.py
0 → 100644
View file @
f282f6ef
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for box_predictor_builder."""
import
mock
import
tensorflow
as
tf
from
google.protobuf
import
text_format
from
object_detection.builders
import
box_predictor_builder
from
object_detection.builders
import
hyperparams_builder
from
object_detection.protos
import
box_predictor_pb2
from
object_detection.protos
import
hyperparams_pb2
class
ConvolutionalBoxPredictorBuilderTest
(
tf
.
test
.
TestCase
):
def
test_box_predictor_calls_conv_argscope_fn
(
self
):
conv_hyperparams_text_proto
=
"""
regularizer {
l1_regularizer {
weight: 0.0003
}
}
initializer {
truncated_normal_initializer {
mean: 0.0
stddev: 0.3
}
}
activation: RELU_6
"""
hyperparams_proto
=
hyperparams_pb2
.
Hyperparams
()
text_format
.
Merge
(
conv_hyperparams_text_proto
,
hyperparams_proto
)
def
mock_conv_argscope_builder
(
conv_hyperparams_arg
,
is_training
):
return
(
conv_hyperparams_arg
,
is_training
)
box_predictor_proto
=
box_predictor_pb2
.
BoxPredictor
()
box_predictor_proto
.
convolutional_box_predictor
.
conv_hyperparams
.
CopyFrom
(
hyperparams_proto
)
box_predictor
=
box_predictor_builder
.
build
(
argscope_fn
=
mock_conv_argscope_builder
,
box_predictor_config
=
box_predictor_proto
,
is_training
=
False
,
num_classes
=
10
)
(
conv_hyperparams_actual
,
is_training
)
=
box_predictor
.
_conv_hyperparams
self
.
assertAlmostEqual
((
hyperparams_proto
.
regularizer
.
l1_regularizer
.
weight
),
(
conv_hyperparams_actual
.
regularizer
.
l1_regularizer
.
weight
))
self
.
assertAlmostEqual
((
hyperparams_proto
.
initializer
.
truncated_normal_initializer
.
stddev
),
(
conv_hyperparams_actual
.
initializer
.
truncated_normal_initializer
.
stddev
))
self
.
assertAlmostEqual
((
hyperparams_proto
.
initializer
.
truncated_normal_initializer
.
mean
),
(
conv_hyperparams_actual
.
initializer
.
truncated_normal_initializer
.
mean
))
self
.
assertEqual
(
hyperparams_proto
.
activation
,
conv_hyperparams_actual
.
activation
)
self
.
assertFalse
(
is_training
)
def
test_construct_non_default_conv_box_predictor
(
self
):
box_predictor_text_proto
=
"""
convolutional_box_predictor {
min_depth: 2
max_depth: 16
num_layers_before_predictor: 2
use_dropout: false
dropout_keep_probability: 0.4
kernel_size: 3
box_code_size: 3
apply_sigmoid_to_scores: true
}
"""
conv_hyperparams_text_proto
=
"""
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
hyperparams_proto
=
hyperparams_pb2
.
Hyperparams
()
text_format
.
Merge
(
conv_hyperparams_text_proto
,
hyperparams_proto
)
def
mock_conv_argscope_builder
(
conv_hyperparams_arg
,
is_training
):
return
(
conv_hyperparams_arg
,
is_training
)
box_predictor_proto
=
box_predictor_pb2
.
BoxPredictor
()
text_format
.
Merge
(
box_predictor_text_proto
,
box_predictor_proto
)
box_predictor_proto
.
convolutional_box_predictor
.
conv_hyperparams
.
CopyFrom
(
hyperparams_proto
)
box_predictor
=
box_predictor_builder
.
build
(
argscope_fn
=
mock_conv_argscope_builder
,
box_predictor_config
=
box_predictor_proto
,
is_training
=
False
,
num_classes
=
10
)
self
.
assertEqual
(
box_predictor
.
_min_depth
,
2
)
self
.
assertEqual
(
box_predictor
.
_max_depth
,
16
)
self
.
assertEqual
(
box_predictor
.
_num_layers_before_predictor
,
2
)
self
.
assertFalse
(
box_predictor
.
_use_dropout
)
self
.
assertAlmostEqual
(
box_predictor
.
_dropout_keep_prob
,
0.4
)
self
.
assertTrue
(
box_predictor
.
_apply_sigmoid_to_scores
)
self
.
assertEqual
(
box_predictor
.
num_classes
,
10
)
self
.
assertFalse
(
box_predictor
.
_is_training
)
def
test_construct_default_conv_box_predictor
(
self
):
box_predictor_text_proto
=
"""
convolutional_box_predictor {
conv_hyperparams {
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
}
}"""
box_predictor_proto
=
box_predictor_pb2
.
BoxPredictor
()
text_format
.
Merge
(
box_predictor_text_proto
,
box_predictor_proto
)
box_predictor
=
box_predictor_builder
.
build
(
argscope_fn
=
hyperparams_builder
.
build
,
box_predictor_config
=
box_predictor_proto
,
is_training
=
True
,
num_classes
=
90
)
self
.
assertEqual
(
box_predictor
.
_min_depth
,
0
)
self
.
assertEqual
(
box_predictor
.
_max_depth
,
0
)
self
.
assertEqual
(
box_predictor
.
_num_layers_before_predictor
,
0
)
self
.
assertTrue
(
box_predictor
.
_use_dropout
)
self
.
assertAlmostEqual
(
box_predictor
.
_dropout_keep_prob
,
0.8
)
self
.
assertFalse
(
box_predictor
.
_apply_sigmoid_to_scores
)
self
.
assertEqual
(
box_predictor
.
num_classes
,
90
)
self
.
assertTrue
(
box_predictor
.
_is_training
)
class
MaskRCNNBoxPredictorBuilderTest
(
tf
.
test
.
TestCase
):
def
test_box_predictor_builder_calls_fc_argscope_fn
(
self
):
fc_hyperparams_text_proto
=
"""
regularizer {
l1_regularizer {
weight: 0.0003
}
}
initializer {
truncated_normal_initializer {
mean: 0.0
stddev: 0.3
}
}
activation: RELU_6
op: FC
"""
hyperparams_proto
=
hyperparams_pb2
.
Hyperparams
()
text_format
.
Merge
(
fc_hyperparams_text_proto
,
hyperparams_proto
)
box_predictor_proto
=
box_predictor_pb2
.
BoxPredictor
()
box_predictor_proto
.
mask_rcnn_box_predictor
.
fc_hyperparams
.
CopyFrom
(
hyperparams_proto
)
mock_argscope_fn
=
mock
.
Mock
(
return_value
=
'arg_scope'
)
box_predictor
=
box_predictor_builder
.
build
(
argscope_fn
=
mock_argscope_fn
,
box_predictor_config
=
box_predictor_proto
,
is_training
=
False
,
num_classes
=
10
)
mock_argscope_fn
.
assert_called_with
(
hyperparams_proto
,
False
)
self
.
assertEqual
(
box_predictor
.
_fc_hyperparams
,
'arg_scope'
)
def
test_non_default_mask_rcnn_box_predictor
(
self
):
fc_hyperparams_text_proto
=
"""
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
activation: RELU_6
op: FC
"""
box_predictor_text_proto
=
"""
mask_rcnn_box_predictor {
use_dropout: true
dropout_keep_probability: 0.8
box_code_size: 3
}
"""
hyperparams_proto
=
hyperparams_pb2
.
Hyperparams
()
text_format
.
Merge
(
fc_hyperparams_text_proto
,
hyperparams_proto
)
def
mock_fc_argscope_builder
(
fc_hyperparams_arg
,
is_training
):
return
(
fc_hyperparams_arg
,
is_training
)
box_predictor_proto
=
box_predictor_pb2
.
BoxPredictor
()
text_format
.
Merge
(
box_predictor_text_proto
,
box_predictor_proto
)
box_predictor_proto
.
mask_rcnn_box_predictor
.
fc_hyperparams
.
CopyFrom
(
hyperparams_proto
)
box_predictor
=
box_predictor_builder
.
build
(
argscope_fn
=
mock_fc_argscope_builder
,
box_predictor_config
=
box_predictor_proto
,
is_training
=
True
,
num_classes
=
90
)
self
.
assertTrue
(
box_predictor
.
_use_dropout
)
self
.
assertAlmostEqual
(
box_predictor
.
_dropout_keep_prob
,
0.8
)
self
.
assertEqual
(
box_predictor
.
num_classes
,
90
)
self
.
assertTrue
(
box_predictor
.
_is_training
)
self
.
assertEqual
(
box_predictor
.
_box_code_size
,
3
)
def
test_build_default_mask_rcnn_box_predictor
(
self
):
box_predictor_proto
=
box_predictor_pb2
.
BoxPredictor
()
box_predictor_proto
.
mask_rcnn_box_predictor
.
fc_hyperparams
.
op
=
(
hyperparams_pb2
.
Hyperparams
.
FC
)
box_predictor
=
box_predictor_builder
.
build
(
argscope_fn
=
mock
.
Mock
(
return_value
=
'arg_scope'
),
box_predictor_config
=
box_predictor_proto
,
is_training
=
True
,
num_classes
=
90
)
self
.
assertFalse
(
box_predictor
.
_use_dropout
)
self
.
assertAlmostEqual
(
box_predictor
.
_dropout_keep_prob
,
0.5
)
self
.
assertEqual
(
box_predictor
.
num_classes
,
90
)
self
.
assertTrue
(
box_predictor
.
_is_training
)
self
.
assertEqual
(
box_predictor
.
_box_code_size
,
4
)
self
.
assertFalse
(
box_predictor
.
_predict_instance_masks
)
self
.
assertFalse
(
box_predictor
.
_predict_keypoints
)
def
test_build_box_predictor_with_mask_branch
(
self
):
box_predictor_proto
=
box_predictor_pb2
.
BoxPredictor
()
box_predictor_proto
.
mask_rcnn_box_predictor
.
fc_hyperparams
.
op
=
(
hyperparams_pb2
.
Hyperparams
.
FC
)
box_predictor_proto
.
mask_rcnn_box_predictor
.
conv_hyperparams
.
op
=
(
hyperparams_pb2
.
Hyperparams
.
CONV
)
box_predictor_proto
.
mask_rcnn_box_predictor
.
predict_instance_masks
=
True
box_predictor_proto
.
mask_rcnn_box_predictor
.
mask_prediction_conv_depth
=
512
mock_argscope_fn
=
mock
.
Mock
(
return_value
=
'arg_scope'
)
box_predictor
=
box_predictor_builder
.
build
(
argscope_fn
=
mock_argscope_fn
,
box_predictor_config
=
box_predictor_proto
,
is_training
=
True
,
num_classes
=
90
)
mock_argscope_fn
.
assert_has_calls
(
[
mock
.
call
(
box_predictor_proto
.
mask_rcnn_box_predictor
.
fc_hyperparams
,
True
),
mock
.
call
(
box_predictor_proto
.
mask_rcnn_box_predictor
.
conv_hyperparams
,
True
)],
any_order
=
True
)
self
.
assertFalse
(
box_predictor
.
_use_dropout
)
self
.
assertAlmostEqual
(
box_predictor
.
_dropout_keep_prob
,
0.5
)
self
.
assertEqual
(
box_predictor
.
num_classes
,
90
)
self
.
assertTrue
(
box_predictor
.
_is_training
)
self
.
assertEqual
(
box_predictor
.
_box_code_size
,
4
)
self
.
assertTrue
(
box_predictor
.
_predict_instance_masks
)
self
.
assertEqual
(
box_predictor
.
_mask_prediction_conv_depth
,
512
)
self
.
assertFalse
(
box_predictor
.
_predict_keypoints
)
class
RfcnBoxPredictorBuilderTest
(
tf
.
test
.
TestCase
):
def
test_box_predictor_calls_fc_argscope_fn
(
self
):
conv_hyperparams_text_proto
=
"""
regularizer {
l1_regularizer {
weight: 0.0003
}
}
initializer {
truncated_normal_initializer {
mean: 0.0
stddev: 0.3
}
}
activation: RELU_6
"""
hyperparams_proto
=
hyperparams_pb2
.
Hyperparams
()
text_format
.
Merge
(
conv_hyperparams_text_proto
,
hyperparams_proto
)
def
mock_conv_argscope_builder
(
conv_hyperparams_arg
,
is_training
):
return
(
conv_hyperparams_arg
,
is_training
)
box_predictor_proto
=
box_predictor_pb2
.
BoxPredictor
()
box_predictor_proto
.
rfcn_box_predictor
.
conv_hyperparams
.
CopyFrom
(
hyperparams_proto
)
box_predictor
=
box_predictor_builder
.
build
(
argscope_fn
=
mock_conv_argscope_builder
,
box_predictor_config
=
box_predictor_proto
,
is_training
=
False
,
num_classes
=
10
)
(
conv_hyperparams_actual
,
is_training
)
=
box_predictor
.
_conv_hyperparams
self
.
assertAlmostEqual
((
hyperparams_proto
.
regularizer
.
l1_regularizer
.
weight
),
(
conv_hyperparams_actual
.
regularizer
.
l1_regularizer
.
weight
))
self
.
assertAlmostEqual
((
hyperparams_proto
.
initializer
.
truncated_normal_initializer
.
stddev
),
(
conv_hyperparams_actual
.
initializer
.
truncated_normal_initializer
.
stddev
))
self
.
assertAlmostEqual
((
hyperparams_proto
.
initializer
.
truncated_normal_initializer
.
mean
),
(
conv_hyperparams_actual
.
initializer
.
truncated_normal_initializer
.
mean
))
self
.
assertEqual
(
hyperparams_proto
.
activation
,
conv_hyperparams_actual
.
activation
)
self
.
assertFalse
(
is_training
)
def
test_non_default_rfcn_box_predictor
(
self
):
conv_hyperparams_text_proto
=
"""
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
activation: RELU_6
"""
box_predictor_text_proto
=
"""
rfcn_box_predictor {
num_spatial_bins_height: 4
num_spatial_bins_width: 4
depth: 4
box_code_size: 3
crop_height: 16
crop_width: 16
}
"""
hyperparams_proto
=
hyperparams_pb2
.
Hyperparams
()
text_format
.
Merge
(
conv_hyperparams_text_proto
,
hyperparams_proto
)
def
mock_conv_argscope_builder
(
conv_hyperparams_arg
,
is_training
):
return
(
conv_hyperparams_arg
,
is_training
)
box_predictor_proto
=
box_predictor_pb2
.
BoxPredictor
()
text_format
.
Merge
(
box_predictor_text_proto
,
box_predictor_proto
)
box_predictor_proto
.
rfcn_box_predictor
.
conv_hyperparams
.
CopyFrom
(
hyperparams_proto
)
box_predictor
=
box_predictor_builder
.
build
(
argscope_fn
=
mock_conv_argscope_builder
,
box_predictor_config
=
box_predictor_proto
,
is_training
=
True
,
num_classes
=
90
)
self
.
assertEqual
(
box_predictor
.
num_classes
,
90
)
self
.
assertTrue
(
box_predictor
.
_is_training
)
self
.
assertEqual
(
box_predictor
.
_box_code_size
,
3
)
self
.
assertEqual
(
box_predictor
.
_num_spatial_bins
,
[
4
,
4
])
self
.
assertEqual
(
box_predictor
.
_crop_size
,
[
16
,
16
])
def
test_default_rfcn_box_predictor
(
self
):
conv_hyperparams_text_proto
=
"""
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
activation: RELU_6
"""
hyperparams_proto
=
hyperparams_pb2
.
Hyperparams
()
text_format
.
Merge
(
conv_hyperparams_text_proto
,
hyperparams_proto
)
def
mock_conv_argscope_builder
(
conv_hyperparams_arg
,
is_training
):
return
(
conv_hyperparams_arg
,
is_training
)
box_predictor_proto
=
box_predictor_pb2
.
BoxPredictor
()
box_predictor_proto
.
rfcn_box_predictor
.
conv_hyperparams
.
CopyFrom
(
hyperparams_proto
)
box_predictor
=
box_predictor_builder
.
build
(
argscope_fn
=
mock_conv_argscope_builder
,
box_predictor_config
=
box_predictor_proto
,
is_training
=
True
,
num_classes
=
90
)
self
.
assertEqual
(
box_predictor
.
num_classes
,
90
)
self
.
assertTrue
(
box_predictor
.
_is_training
)
self
.
assertEqual
(
box_predictor
.
_box_code_size
,
4
)
self
.
assertEqual
(
box_predictor
.
_num_spatial_bins
,
[
3
,
3
])
self
.
assertEqual
(
box_predictor
.
_crop_size
,
[
12
,
12
])
if
__name__
==
'__main__'
:
tf
.
test
.
main
()
object_detection/builders/hyperparams_builder.py
0 → 100644
View file @
f282f6ef
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Builder function to construct tf-slim arg_scope for convolution, fc ops."""
import
tensorflow
as
tf
from
object_detection.protos
import
hyperparams_pb2
slim
=
tf
.
contrib
.
slim
def
build
(
hyperparams_config
,
is_training
):
"""Builds tf-slim arg_scope for convolution ops based on the config.
Returns an arg_scope to use for convolution ops containing weights
initializer, weights regularizer, activation function, batch norm function
and batch norm parameters based on the configuration.
Note that if the batch_norm parameteres are not specified in the config
(i.e. left to default) then batch norm is excluded from the arg_scope.
The batch norm parameters are set for updates based on `is_training` argument
and conv_hyperparams_config.batch_norm.train parameter. During training, they
are updated only if batch_norm.train parameter is true. However, during eval,
no updates are made to the batch norm variables. In both cases, their current
values are used during forward pass.
Args:
hyperparams_config: hyperparams.proto object containing
hyperparameters.
is_training: Whether the network is in training mode.
Returns:
arg_scope: tf-slim arg_scope containing hyperparameters for ops.
Raises:
ValueError: if hyperparams_config is not of type hyperparams.Hyperparams.
"""
if
not
isinstance
(
hyperparams_config
,
hyperparams_pb2
.
Hyperparams
):
raise
ValueError
(
'hyperparams_config not of type '
'hyperparams_pb.Hyperparams.'
)
batch_norm
=
None
batch_norm_params
=
None
if
hyperparams_config
.
HasField
(
'batch_norm'
):
batch_norm
=
slim
.
batch_norm
batch_norm_params
=
_build_batch_norm_params
(
hyperparams_config
.
batch_norm
,
is_training
)
affected_ops
=
[
slim
.
conv2d
,
slim
.
separable_conv2d
,
slim
.
conv2d_transpose
]
if
hyperparams_config
.
HasField
(
'op'
)
and
(
hyperparams_config
.
op
==
hyperparams_pb2
.
Hyperparams
.
FC
):
affected_ops
=
[
slim
.
fully_connected
]
with
slim
.
arg_scope
(
affected_ops
,
weights_regularizer
=
_build_regularizer
(
hyperparams_config
.
regularizer
),
weights_initializer
=
_build_initializer
(
hyperparams_config
.
initializer
),
activation_fn
=
_build_activation_fn
(
hyperparams_config
.
activation
),
normalizer_fn
=
batch_norm
,
normalizer_params
=
batch_norm_params
)
as
sc
:
return
sc
def
_build_activation_fn
(
activation_fn
):
"""Builds a callable activation from config.
Args:
activation_fn: hyperparams_pb2.Hyperparams.activation
Returns:
Callable activation function.
Raises:
ValueError: On unknown activation function.
"""
if
activation_fn
==
hyperparams_pb2
.
Hyperparams
.
NONE
:
return
None
if
activation_fn
==
hyperparams_pb2
.
Hyperparams
.
RELU
:
return
tf
.
nn
.
relu
if
activation_fn
==
hyperparams_pb2
.
Hyperparams
.
RELU_6
:
return
tf
.
nn
.
relu6
raise
ValueError
(
'Unknown activation function: {}'
.
format
(
activation_fn
))
def
_build_regularizer
(
regularizer
):
"""Builds a tf-slim regularizer from config.
Args:
regularizer: hyperparams_pb2.Hyperparams.regularizer proto.
Returns:
tf-slim regularizer.
Raises:
ValueError: On unknown regularizer.
"""
regularizer_oneof
=
regularizer
.
WhichOneof
(
'regularizer_oneof'
)
if
regularizer_oneof
==
'l1_regularizer'
:
return
slim
.
l1_regularizer
(
scale
=
float
(
regularizer
.
l1_regularizer
.
weight
))
if
regularizer_oneof
==
'l2_regularizer'
:
return
slim
.
l2_regularizer
(
scale
=
float
(
regularizer
.
l2_regularizer
.
weight
))
raise
ValueError
(
'Unknown regularizer function: {}'
.
format
(
regularizer_oneof
))
def
_build_initializer
(
initializer
):
"""Build a tf initializer from config.
Args:
initializer: hyperparams_pb2.Hyperparams.regularizer proto.
Returns:
tf initializer.
Raises:
ValueError: On unknown initializer.
"""
initializer_oneof
=
initializer
.
WhichOneof
(
'initializer_oneof'
)
if
initializer_oneof
==
'truncated_normal_initializer'
:
return
tf
.
truncated_normal_initializer
(
mean
=
initializer
.
truncated_normal_initializer
.
mean
,
stddev
=
initializer
.
truncated_normal_initializer
.
stddev
)
if
initializer_oneof
==
'variance_scaling_initializer'
:
enum_descriptor
=
(
hyperparams_pb2
.
VarianceScalingInitializer
.
DESCRIPTOR
.
enum_types_by_name
[
'Mode'
])
mode
=
enum_descriptor
.
values_by_number
[
initializer
.
variance_scaling_initializer
.
mode
].
name
return
slim
.
variance_scaling_initializer
(
factor
=
initializer
.
variance_scaling_initializer
.
factor
,
mode
=
mode
,
uniform
=
initializer
.
variance_scaling_initializer
.
uniform
)
raise
ValueError
(
'Unknown initializer function: {}'
.
format
(
initializer_oneof
))
def
_build_batch_norm_params
(
batch_norm
,
is_training
):
"""Build a dictionary of batch_norm params from config.
Args:
batch_norm: hyperparams_pb2.ConvHyperparams.batch_norm proto.
is_training: Whether the models is in training mode.
Returns:
A dictionary containing batch_norm parameters.
"""
batch_norm_params
=
{
'decay'
:
batch_norm
.
decay
,
'center'
:
batch_norm
.
center
,
'scale'
:
batch_norm
.
scale
,
'epsilon'
:
batch_norm
.
epsilon
,
'fused'
:
True
,
'is_training'
:
is_training
and
batch_norm
.
train
,
}
return
batch_norm_params
object_detection/builders/hyperparams_builder_test.py
0 → 100644
View file @
f282f6ef
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests object_detection.core.hyperparams_builder."""
import
numpy
as
np
import
tensorflow
as
tf
from
google.protobuf
import
text_format
# TODO: Rewrite third_party imports.
from
object_detection.builders
import
hyperparams_builder
from
object_detection.protos
import
hyperparams_pb2
slim
=
tf
.
contrib
.
slim
class
HyperparamsBuilderTest
(
tf
.
test
.
TestCase
):
# TODO: Make this a public api in slim arg_scope.py.
def
_get_scope_key
(
self
,
op
):
return
getattr
(
op
,
'_key_op'
,
str
(
op
))
def
test_default_arg_scope_has_conv2d_op
(
self
):
conv_hyperparams_text_proto
=
"""
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto
=
hyperparams_pb2
.
Hyperparams
()
text_format
.
Merge
(
conv_hyperparams_text_proto
,
conv_hyperparams_proto
)
scope
=
hyperparams_builder
.
build
(
conv_hyperparams_proto
,
is_training
=
True
)
self
.
assertTrue
(
self
.
_get_scope_key
(
slim
.
conv2d
)
in
scope
)
def
test_default_arg_scope_has_separable_conv2d_op
(
self
):
conv_hyperparams_text_proto
=
"""
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto
=
hyperparams_pb2
.
Hyperparams
()
text_format
.
Merge
(
conv_hyperparams_text_proto
,
conv_hyperparams_proto
)
scope
=
hyperparams_builder
.
build
(
conv_hyperparams_proto
,
is_training
=
True
)
self
.
assertTrue
(
self
.
_get_scope_key
(
slim
.
separable_conv2d
)
in
scope
)
def
test_default_arg_scope_has_conv2d_transpose_op
(
self
):
conv_hyperparams_text_proto
=
"""
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto
=
hyperparams_pb2
.
Hyperparams
()
text_format
.
Merge
(
conv_hyperparams_text_proto
,
conv_hyperparams_proto
)
scope
=
hyperparams_builder
.
build
(
conv_hyperparams_proto
,
is_training
=
True
)
self
.
assertTrue
(
self
.
_get_scope_key
(
slim
.
conv2d_transpose
)
in
scope
)
def
test_explicit_fc_op_arg_scope_has_fully_connected_op
(
self
):
conv_hyperparams_text_proto
=
"""
op: FC
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto
=
hyperparams_pb2
.
Hyperparams
()
text_format
.
Merge
(
conv_hyperparams_text_proto
,
conv_hyperparams_proto
)
scope
=
hyperparams_builder
.
build
(
conv_hyperparams_proto
,
is_training
=
True
)
self
.
assertTrue
(
self
.
_get_scope_key
(
slim
.
fully_connected
)
in
scope
)
def
test_separable_conv2d_and_conv2d_and_transpose_have_same_parameters
(
self
):
conv_hyperparams_text_proto
=
"""
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto
=
hyperparams_pb2
.
Hyperparams
()
text_format
.
Merge
(
conv_hyperparams_text_proto
,
conv_hyperparams_proto
)
scope
=
hyperparams_builder
.
build
(
conv_hyperparams_proto
,
is_training
=
True
)
kwargs_1
,
kwargs_2
,
kwargs_3
=
scope
.
values
()
self
.
assertDictEqual
(
kwargs_1
,
kwargs_2
)
self
.
assertDictEqual
(
kwargs_1
,
kwargs_3
)
def
test_return_l1_regularized_weights
(
self
):
conv_hyperparams_text_proto
=
"""
regularizer {
l1_regularizer {
weight: 0.5
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto
=
hyperparams_pb2
.
Hyperparams
()
text_format
.
Merge
(
conv_hyperparams_text_proto
,
conv_hyperparams_proto
)
scope
=
hyperparams_builder
.
build
(
conv_hyperparams_proto
,
is_training
=
True
)
conv_scope_arguments
=
scope
.
values
()[
0
]
regularizer
=
conv_scope_arguments
[
'weights_regularizer'
]
weights
=
np
.
array
([
1.
,
-
1
,
4.
,
2.
])
with
self
.
test_session
()
as
sess
:
result
=
sess
.
run
(
regularizer
(
tf
.
constant
(
weights
)))
self
.
assertAllClose
(
np
.
abs
(
weights
).
sum
()
*
0.5
,
result
)
def
test_return_l2_regularizer_weights
(
self
):
conv_hyperparams_text_proto
=
"""
regularizer {
l2_regularizer {
weight: 0.42
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto
=
hyperparams_pb2
.
Hyperparams
()
text_format
.
Merge
(
conv_hyperparams_text_proto
,
conv_hyperparams_proto
)
scope
=
hyperparams_builder
.
build
(
conv_hyperparams_proto
,
is_training
=
True
)
conv_scope_arguments
=
scope
.
values
()[
0
]
regularizer
=
conv_scope_arguments
[
'weights_regularizer'
]
weights
=
np
.
array
([
1.
,
-
1
,
4.
,
2.
])
with
self
.
test_session
()
as
sess
:
result
=
sess
.
run
(
regularizer
(
tf
.
constant
(
weights
)))
self
.
assertAllClose
(
np
.
power
(
weights
,
2
).
sum
()
/
2.0
*
0.42
,
result
)
def
test_return_non_default_batch_norm_params_with_train_during_train
(
self
):
conv_hyperparams_text_proto
=
"""
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
batch_norm {
decay: 0.7
center: false
scale: true
epsilon: 0.03
train: true
}
"""
conv_hyperparams_proto
=
hyperparams_pb2
.
Hyperparams
()
text_format
.
Merge
(
conv_hyperparams_text_proto
,
conv_hyperparams_proto
)
scope
=
hyperparams_builder
.
build
(
conv_hyperparams_proto
,
is_training
=
True
)
conv_scope_arguments
=
scope
.
values
()[
0
]
self
.
assertEqual
(
conv_scope_arguments
[
'normalizer_fn'
],
slim
.
batch_norm
)
batch_norm_params
=
conv_scope_arguments
[
'normalizer_params'
]
self
.
assertAlmostEqual
(
batch_norm_params
[
'decay'
],
0.7
)
self
.
assertAlmostEqual
(
batch_norm_params
[
'epsilon'
],
0.03
)
self
.
assertFalse
(
batch_norm_params
[
'center'
])
self
.
assertTrue
(
batch_norm_params
[
'scale'
])
self
.
assertTrue
(
batch_norm_params
[
'is_training'
])
def
test_return_batch_norm_params_with_notrain_during_eval
(
self
):
conv_hyperparams_text_proto
=
"""
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
batch_norm {
decay: 0.7
center: false
scale: true
epsilon: 0.03
train: true
}
"""
conv_hyperparams_proto
=
hyperparams_pb2
.
Hyperparams
()
text_format
.
Merge
(
conv_hyperparams_text_proto
,
conv_hyperparams_proto
)
scope
=
hyperparams_builder
.
build
(
conv_hyperparams_proto
,
is_training
=
False
)
conv_scope_arguments
=
scope
.
values
()[
0
]
self
.
assertEqual
(
conv_scope_arguments
[
'normalizer_fn'
],
slim
.
batch_norm
)
batch_norm_params
=
conv_scope_arguments
[
'normalizer_params'
]
self
.
assertAlmostEqual
(
batch_norm_params
[
'decay'
],
0.7
)
self
.
assertAlmostEqual
(
batch_norm_params
[
'epsilon'
],
0.03
)
self
.
assertFalse
(
batch_norm_params
[
'center'
])
self
.
assertTrue
(
batch_norm_params
[
'scale'
])
self
.
assertFalse
(
batch_norm_params
[
'is_training'
])
def
test_return_batch_norm_params_with_notrain_when_train_is_false
(
self
):
conv_hyperparams_text_proto
=
"""
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
batch_norm {
decay: 0.7
center: false
scale: true
epsilon: 0.03
train: false
}
"""
conv_hyperparams_proto
=
hyperparams_pb2
.
Hyperparams
()
text_format
.
Merge
(
conv_hyperparams_text_proto
,
conv_hyperparams_proto
)
scope
=
hyperparams_builder
.
build
(
conv_hyperparams_proto
,
is_training
=
True
)
conv_scope_arguments
=
scope
.
values
()[
0
]
self
.
assertEqual
(
conv_scope_arguments
[
'normalizer_fn'
],
slim
.
batch_norm
)
batch_norm_params
=
conv_scope_arguments
[
'normalizer_params'
]
self
.
assertAlmostEqual
(
batch_norm_params
[
'decay'
],
0.7
)
self
.
assertAlmostEqual
(
batch_norm_params
[
'epsilon'
],
0.03
)
self
.
assertFalse
(
batch_norm_params
[
'center'
])
self
.
assertTrue
(
batch_norm_params
[
'scale'
])
self
.
assertFalse
(
batch_norm_params
[
'is_training'
])
def
test_do_not_use_batch_norm_if_default
(
self
):
conv_hyperparams_text_proto
=
"""
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto
=
hyperparams_pb2
.
Hyperparams
()
text_format
.
Merge
(
conv_hyperparams_text_proto
,
conv_hyperparams_proto
)
scope
=
hyperparams_builder
.
build
(
conv_hyperparams_proto
,
is_training
=
True
)
conv_scope_arguments
=
scope
.
values
()[
0
]
self
.
assertEqual
(
conv_scope_arguments
[
'normalizer_fn'
],
None
)
self
.
assertEqual
(
conv_scope_arguments
[
'normalizer_params'
],
None
)
def
test_use_none_activation
(
self
):
conv_hyperparams_text_proto
=
"""
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
activation: NONE
"""
conv_hyperparams_proto
=
hyperparams_pb2
.
Hyperparams
()
text_format
.
Merge
(
conv_hyperparams_text_proto
,
conv_hyperparams_proto
)
scope
=
hyperparams_builder
.
build
(
conv_hyperparams_proto
,
is_training
=
True
)
conv_scope_arguments
=
scope
.
values
()[
0
]
self
.
assertEqual
(
conv_scope_arguments
[
'activation_fn'
],
None
)
def
test_use_relu_activation
(
self
):
conv_hyperparams_text_proto
=
"""
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
activation: RELU
"""
conv_hyperparams_proto
=
hyperparams_pb2
.
Hyperparams
()
text_format
.
Merge
(
conv_hyperparams_text_proto
,
conv_hyperparams_proto
)
scope
=
hyperparams_builder
.
build
(
conv_hyperparams_proto
,
is_training
=
True
)
conv_scope_arguments
=
scope
.
values
()[
0
]
self
.
assertEqual
(
conv_scope_arguments
[
'activation_fn'
],
tf
.
nn
.
relu
)
def
test_use_relu_6_activation
(
self
):
conv_hyperparams_text_proto
=
"""
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
activation: RELU_6
"""
conv_hyperparams_proto
=
hyperparams_pb2
.
Hyperparams
()
text_format
.
Merge
(
conv_hyperparams_text_proto
,
conv_hyperparams_proto
)
scope
=
hyperparams_builder
.
build
(
conv_hyperparams_proto
,
is_training
=
True
)
conv_scope_arguments
=
scope
.
values
()[
0
]
self
.
assertEqual
(
conv_scope_arguments
[
'activation_fn'
],
tf
.
nn
.
relu6
)
def
_assert_variance_in_range
(
self
,
initializer
,
shape
,
variance
,
tol
=
1e-2
):
with
tf
.
Graph
().
as_default
()
as
g
:
with
self
.
test_session
(
graph
=
g
)
as
sess
:
var
=
tf
.
get_variable
(
name
=
'test'
,
shape
=
shape
,
dtype
=
tf
.
float32
,
initializer
=
initializer
)
sess
.
run
(
tf
.
global_variables_initializer
())
values
=
sess
.
run
(
var
)
self
.
assertAllClose
(
np
.
var
(
values
),
variance
,
tol
,
tol
)
def
test_variance_in_range_with_variance_scaling_initializer_fan_in
(
self
):
conv_hyperparams_text_proto
=
"""
regularizer {
l2_regularizer {
}
}
initializer {
variance_scaling_initializer {
factor: 2.0
mode: FAN_IN
uniform: false
}
}
"""
conv_hyperparams_proto
=
hyperparams_pb2
.
Hyperparams
()
text_format
.
Merge
(
conv_hyperparams_text_proto
,
conv_hyperparams_proto
)
scope
=
hyperparams_builder
.
build
(
conv_hyperparams_proto
,
is_training
=
True
)
conv_scope_arguments
=
scope
.
values
()[
0
]
initializer
=
conv_scope_arguments
[
'weights_initializer'
]
self
.
_assert_variance_in_range
(
initializer
,
shape
=
[
100
,
40
],
variance
=
2.
/
100.
)
def
test_variance_in_range_with_variance_scaling_initializer_fan_out
(
self
):
conv_hyperparams_text_proto
=
"""
regularizer {
l2_regularizer {
}
}
initializer {
variance_scaling_initializer {
factor: 2.0
mode: FAN_OUT
uniform: false
}
}
"""
conv_hyperparams_proto
=
hyperparams_pb2
.
Hyperparams
()
text_format
.
Merge
(
conv_hyperparams_text_proto
,
conv_hyperparams_proto
)
scope
=
hyperparams_builder
.
build
(
conv_hyperparams_proto
,
is_training
=
True
)
conv_scope_arguments
=
scope
.
values
()[
0
]
initializer
=
conv_scope_arguments
[
'weights_initializer'
]
self
.
_assert_variance_in_range
(
initializer
,
shape
=
[
100
,
40
],
variance
=
2.
/
40.
)
def
test_variance_in_range_with_variance_scaling_initializer_fan_avg
(
self
):
conv_hyperparams_text_proto
=
"""
regularizer {
l2_regularizer {
}
}
initializer {
variance_scaling_initializer {
factor: 2.0
mode: FAN_AVG
uniform: false
}
}
"""
conv_hyperparams_proto
=
hyperparams_pb2
.
Hyperparams
()
text_format
.
Merge
(
conv_hyperparams_text_proto
,
conv_hyperparams_proto
)
scope
=
hyperparams_builder
.
build
(
conv_hyperparams_proto
,
is_training
=
True
)
conv_scope_arguments
=
scope
.
values
()[
0
]
initializer
=
conv_scope_arguments
[
'weights_initializer'
]
self
.
_assert_variance_in_range
(
initializer
,
shape
=
[
100
,
40
],
variance
=
4.
/
(
100.
+
40.
))
def
test_variance_in_range_with_variance_scaling_initializer_uniform
(
self
):
conv_hyperparams_text_proto
=
"""
regularizer {
l2_regularizer {
}
}
initializer {
variance_scaling_initializer {
factor: 2.0
mode: FAN_IN
uniform: true
}
}
"""
conv_hyperparams_proto
=
hyperparams_pb2
.
Hyperparams
()
text_format
.
Merge
(
conv_hyperparams_text_proto
,
conv_hyperparams_proto
)
scope
=
hyperparams_builder
.
build
(
conv_hyperparams_proto
,
is_training
=
True
)
conv_scope_arguments
=
scope
.
values
()[
0
]
initializer
=
conv_scope_arguments
[
'weights_initializer'
]
self
.
_assert_variance_in_range
(
initializer
,
shape
=
[
100
,
40
],
variance
=
2.
/
100.
)
def
test_variance_in_range_with_truncated_normal_initializer
(
self
):
conv_hyperparams_text_proto
=
"""
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
mean: 0.0
stddev: 0.8
}
}
"""
conv_hyperparams_proto
=
hyperparams_pb2
.
Hyperparams
()
text_format
.
Merge
(
conv_hyperparams_text_proto
,
conv_hyperparams_proto
)
scope
=
hyperparams_builder
.
build
(
conv_hyperparams_proto
,
is_training
=
True
)
conv_scope_arguments
=
scope
.
values
()[
0
]
initializer
=
conv_scope_arguments
[
'weights_initializer'
]
self
.
_assert_variance_in_range
(
initializer
,
shape
=
[
100
,
40
],
variance
=
0.49
,
tol
=
1e-1
)
if
__name__
==
'__main__'
:
tf
.
test
.
main
()
object_detection/builders/image_resizer_builder.py
0 → 100644
View file @
f282f6ef
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Builder function for image resizing operations."""
import
functools
from
object_detection.core
import
preprocessor
from
object_detection.protos
import
image_resizer_pb2
def
build
(
image_resizer_config
):
"""Builds callable for image resizing operations.
Args:
image_resizer_config: image_resizer.proto object containing parameters for
an image resizing operation.
Returns:
image_resizer_fn: Callable for image resizing. This callable always takes
a rank-3 image tensor (corresponding to a single image) and returns a
rank-3 image tensor, possibly with new spatial dimensions.
Raises:
ValueError: if `image_resizer_config` is of incorrect type.
ValueError: if `image_resizer_config.image_resizer_oneof` is of expected
type.
ValueError: if min_dimension > max_dimension when keep_aspect_ratio_resizer
is used.
"""
if
not
isinstance
(
image_resizer_config
,
image_resizer_pb2
.
ImageResizer
):
raise
ValueError
(
'image_resizer_config not of type '
'image_resizer_pb2.ImageResizer.'
)
if
image_resizer_config
.
WhichOneof
(
'image_resizer_oneof'
)
==
'keep_aspect_ratio_resizer'
:
keep_aspect_ratio_config
=
image_resizer_config
.
keep_aspect_ratio_resizer
if
not
(
keep_aspect_ratio_config
.
min_dimension
<=
keep_aspect_ratio_config
.
max_dimension
):
raise
ValueError
(
'min_dimension > max_dimension'
)
return
functools
.
partial
(
preprocessor
.
resize_to_range
,
min_dimension
=
keep_aspect_ratio_config
.
min_dimension
,
max_dimension
=
keep_aspect_ratio_config
.
max_dimension
)
if
image_resizer_config
.
WhichOneof
(
'image_resizer_oneof'
)
==
'fixed_shape_resizer'
:
fixed_shape_resizer_config
=
image_resizer_config
.
fixed_shape_resizer
return
functools
.
partial
(
preprocessor
.
resize_image
,
new_height
=
fixed_shape_resizer_config
.
height
,
new_width
=
fixed_shape_resizer_config
.
width
)
raise
ValueError
(
'Invalid image resizer option.'
)
object_detection/builders/image_resizer_builder_test.py
0 → 100644
View file @
f282f6ef
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.builders.image_resizer_builder."""
import
tensorflow
as
tf
from
google.protobuf
import
text_format
from
object_detection.builders
import
image_resizer_builder
from
object_detection.protos
import
image_resizer_pb2
class
ImageResizerBuilderTest
(
tf
.
test
.
TestCase
):
def
_shape_of_resized_random_image_given_text_proto
(
self
,
input_shape
,
text_proto
):
image_resizer_config
=
image_resizer_pb2
.
ImageResizer
()
text_format
.
Merge
(
text_proto
,
image_resizer_config
)
image_resizer_fn
=
image_resizer_builder
.
build
(
image_resizer_config
)
images
=
tf
.
to_float
(
tf
.
random_uniform
(
input_shape
,
minval
=
0
,
maxval
=
255
,
dtype
=
tf
.
int32
))
resized_images
=
image_resizer_fn
(
images
)
with
self
.
test_session
()
as
sess
:
return
sess
.
run
(
resized_images
).
shape
def
test_built_keep_aspect_ratio_resizer_returns_expected_shape
(
self
):
image_resizer_text_proto
=
"""
keep_aspect_ratio_resizer {
min_dimension: 10
max_dimension: 20
}
"""
input_shape
=
(
50
,
25
,
3
)
expected_output_shape
=
(
20
,
10
,
3
)
output_shape
=
self
.
_shape_of_resized_random_image_given_text_proto
(
input_shape
,
image_resizer_text_proto
)
self
.
assertEqual
(
output_shape
,
expected_output_shape
)
def
test_built_fixed_shape_resizer_returns_expected_shape
(
self
):
image_resizer_text_proto
=
"""
fixed_shape_resizer {
height: 10
width: 20
}
"""
input_shape
=
(
50
,
25
,
3
)
expected_output_shape
=
(
10
,
20
,
3
)
output_shape
=
self
.
_shape_of_resized_random_image_given_text_proto
(
input_shape
,
image_resizer_text_proto
)
self
.
assertEqual
(
output_shape
,
expected_output_shape
)
def
test_raises_error_on_invalid_input
(
self
):
invalid_input
=
'invalid_input'
with
self
.
assertRaises
(
ValueError
):
image_resizer_builder
.
build
(
invalid_input
)
if
__name__
==
'__main__'
:
tf
.
test
.
main
()
object_detection/builders/input_reader_builder.py
0 → 100644
View file @
f282f6ef
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Input reader builder.
Creates data sources for DetectionModels from an InputReader config. See
input_reader.proto for options.
Note: If users wishes to also use their own InputReaders with the Object
Detection configuration framework, they should define their own builder function
that wraps the build function.
"""
import
tensorflow
as
tf
from
object_detection.data_decoders
import
tf_example_decoder
from
object_detection.protos
import
input_reader_pb2
parallel_reader
=
tf
.
contrib
.
slim
.
parallel_reader
def
build
(
input_reader_config
):
"""Builds a tensor dictionary based on the InputReader config.
Args:
input_reader_config: A input_reader_pb2.InputReader object.
Returns:
A tensor dict based on the input_reader_config.
Raises:
ValueError: On invalid input reader proto.
"""
if
not
isinstance
(
input_reader_config
,
input_reader_pb2
.
InputReader
):
raise
ValueError
(
'input_reader_config not of type '
'input_reader_pb2.InputReader.'
)
if
input_reader_config
.
WhichOneof
(
'input_reader'
)
==
'tf_record_input_reader'
:
config
=
input_reader_config
.
tf_record_input_reader
_
,
string_tensor
=
parallel_reader
.
parallel_read
(
config
.
input_path
,
reader_class
=
tf
.
TFRecordReader
,
num_epochs
=
(
input_reader_config
.
num_epochs
if
input_reader_config
.
num_epochs
else
None
),
num_readers
=
input_reader_config
.
num_readers
,
shuffle
=
input_reader_config
.
shuffle
,
dtypes
=
[
tf
.
string
,
tf
.
string
],
capacity
=
input_reader_config
.
queue_capacity
,
min_after_dequeue
=
input_reader_config
.
min_after_dequeue
)
return
tf_example_decoder
.
TfExampleDecoder
().
decode
(
string_tensor
)
raise
ValueError
(
'Unsupported input_reader_config.'
)
object_detection/builders/input_reader_builder_test.py
0 → 100644
View file @
f282f6ef
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for input_reader_builder."""
import
os
import
numpy
as
np
import
tensorflow
as
tf
from
google.protobuf
import
text_format
from
tensorflow.core.example
import
example_pb2
from
tensorflow.core.example
import
feature_pb2
from
object_detection.builders
import
input_reader_builder
from
object_detection.core
import
standard_fields
as
fields
from
object_detection.protos
import
input_reader_pb2
class
InputReaderBuilderTest
(
tf
.
test
.
TestCase
):
def
create_tf_record
(
self
):
path
=
os
.
path
.
join
(
self
.
get_temp_dir
(),
'tfrecord'
)
writer
=
tf
.
python_io
.
TFRecordWriter
(
path
)
image_tensor
=
np
.
random
.
randint
(
255
,
size
=
(
4
,
5
,
3
)).
astype
(
np
.
uint8
)
with
self
.
test_session
():
encoded_jpeg
=
tf
.
image
.
encode_jpeg
(
tf
.
constant
(
image_tensor
)).
eval
()
example
=
example_pb2
.
Example
(
features
=
feature_pb2
.
Features
(
feature
=
{
'image/encoded'
:
feature_pb2
.
Feature
(
bytes_list
=
feature_pb2
.
BytesList
(
value
=
[
encoded_jpeg
])),
'image/format'
:
feature_pb2
.
Feature
(
bytes_list
=
feature_pb2
.
BytesList
(
value
=
[
'jpeg'
.
encode
(
'utf-8'
)])),
'image/object/bbox/xmin'
:
feature_pb2
.
Feature
(
float_list
=
feature_pb2
.
FloatList
(
value
=
[
0.0
])),
'image/object/bbox/xmax'
:
feature_pb2
.
Feature
(
float_list
=
feature_pb2
.
FloatList
(
value
=
[
1.0
])),
'image/object/bbox/ymin'
:
feature_pb2
.
Feature
(
float_list
=
feature_pb2
.
FloatList
(
value
=
[
0.0
])),
'image/object/bbox/ymax'
:
feature_pb2
.
Feature
(
float_list
=
feature_pb2
.
FloatList
(
value
=
[
1.0
])),
'image/object/class/label'
:
feature_pb2
.
Feature
(
int64_list
=
feature_pb2
.
Int64List
(
value
=
[
2
])),
}))
writer
.
write
(
example
.
SerializeToString
())
writer
.
close
()
return
path
def
test_build_tf_record_input_reader
(
self
):
tf_record_path
=
self
.
create_tf_record
()
input_reader_text_proto
=
"""
shuffle: false
num_readers: 1
tf_record_input_reader {{
input_path: '{0}'
}}
"""
.
format
(
tf_record_path
)
input_reader_proto
=
input_reader_pb2
.
InputReader
()
text_format
.
Merge
(
input_reader_text_proto
,
input_reader_proto
)
tensor_dict
=
input_reader_builder
.
build
(
input_reader_proto
)
sv
=
tf
.
train
.
Supervisor
(
logdir
=
self
.
get_temp_dir
())
with
sv
.
prepare_or_wait_for_session
()
as
sess
:
sv
.
start_queue_runners
(
sess
)
output_dict
=
sess
.
run
(
tensor_dict
)
self
.
assertEquals
(
(
4
,
5
,
3
),
output_dict
[
fields
.
InputDataFields
.
image
].
shape
)
self
.
assertEquals
(
[
2
],
output_dict
[
fields
.
InputDataFields
.
groundtruth_classes
])
self
.
assertEquals
(
(
1
,
4
),
output_dict
[
fields
.
InputDataFields
.
groundtruth_boxes
].
shape
)
self
.
assertAllEqual
(
[
0.0
,
0.0
,
1.0
,
1.0
],
output_dict
[
fields
.
InputDataFields
.
groundtruth_boxes
][
0
])
if
__name__
==
'__main__'
:
tf
.
test
.
main
()
object_detection/builders/losses_builder.py
0 → 100644
View file @
f282f6ef
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A function to build localization and classification losses from config."""
from
object_detection.core
import
losses
from
object_detection.protos
import
losses_pb2
def
build
(
loss_config
):
"""Build losses based on the config.
Builds classification, localization losses and optionally a hard example miner
based on the config.
Args:
loss_config: A losses_pb2.Loss object.
Returns:
classification_loss: Classification loss object.
localization_loss: Localization loss object.
classification_weight: Classification loss weight.
localization_weight: Localization loss weight.
hard_example_miner: Hard example miner object.
"""
classification_loss
=
_build_classification_loss
(
loss_config
.
classification_loss
)
localization_loss
=
_build_localization_loss
(
loss_config
.
localization_loss
)
classification_weight
=
loss_config
.
classification_weight
localization_weight
=
loss_config
.
localization_weight
hard_example_miner
=
None
if
loss_config
.
HasField
(
'hard_example_miner'
):
hard_example_miner
=
build_hard_example_miner
(
loss_config
.
hard_example_miner
,
classification_weight
,
localization_weight
)
return
(
classification_loss
,
localization_loss
,
classification_weight
,
localization_weight
,
hard_example_miner
)
def
build_hard_example_miner
(
config
,
classification_weight
,
localization_weight
):
"""Builds hard example miner based on the config.
Args:
config: A losses_pb2.HardExampleMiner object.
classification_weight: Classification loss weight.
localization_weight: Localization loss weight.
Returns:
Hard example miner.
"""
loss_type
=
None
if
config
.
loss_type
==
losses_pb2
.
HardExampleMiner
.
BOTH
:
loss_type
=
'both'
if
config
.
loss_type
==
losses_pb2
.
HardExampleMiner
.
CLASSIFICATION
:
loss_type
=
'cls'
if
config
.
loss_type
==
losses_pb2
.
HardExampleMiner
.
LOCALIZATION
:
loss_type
=
'loc'
max_negatives_per_positive
=
None
num_hard_examples
=
None
if
config
.
max_negatives_per_positive
>
0
:
max_negatives_per_positive
=
config
.
max_negatives_per_positive
if
config
.
num_hard_examples
>
0
:
num_hard_examples
=
config
.
num_hard_examples
hard_example_miner
=
losses
.
HardExampleMiner
(
num_hard_examples
=
num_hard_examples
,
iou_threshold
=
config
.
iou_threshold
,
loss_type
=
loss_type
,
cls_loss_weight
=
classification_weight
,
loc_loss_weight
=
localization_weight
,
max_negatives_per_positive
=
max_negatives_per_positive
,
min_negatives_per_image
=
config
.
min_negatives_per_image
)
return
hard_example_miner
def
_build_localization_loss
(
loss_config
):
"""Builds a localization loss based on the loss config.
Args:
loss_config: A losses_pb2.LocalizationLoss object.
Returns:
Loss based on the config.
Raises:
ValueError: On invalid loss_config.
"""
if
not
isinstance
(
loss_config
,
losses_pb2
.
LocalizationLoss
):
raise
ValueError
(
'loss_config not of type losses_pb2.LocalizationLoss.'
)
loss_type
=
loss_config
.
WhichOneof
(
'localization_loss'
)
if
loss_type
==
'weighted_l2'
:
config
=
loss_config
.
weighted_l2
return
losses
.
WeightedL2LocalizationLoss
(
anchorwise_output
=
config
.
anchorwise_output
)
if
loss_type
==
'weighted_smooth_l1'
:
config
=
loss_config
.
weighted_smooth_l1
return
losses
.
WeightedSmoothL1LocalizationLoss
(
anchorwise_output
=
config
.
anchorwise_output
)
if
loss_type
==
'weighted_iou'
:
return
losses
.
WeightedIOULocalizationLoss
()
raise
ValueError
(
'Empty loss config.'
)
def
_build_classification_loss
(
loss_config
):
"""Builds a classification loss based on the loss config.
Args:
loss_config: A losses_pb2.ClassificationLoss object.
Returns:
Loss based on the config.
Raises:
ValueError: On invalid loss_config.
"""
if
not
isinstance
(
loss_config
,
losses_pb2
.
ClassificationLoss
):
raise
ValueError
(
'loss_config not of type losses_pb2.ClassificationLoss.'
)
loss_type
=
loss_config
.
WhichOneof
(
'classification_loss'
)
if
loss_type
==
'weighted_sigmoid'
:
config
=
loss_config
.
weighted_sigmoid
return
losses
.
WeightedSigmoidClassificationLoss
(
anchorwise_output
=
config
.
anchorwise_output
)
if
loss_type
==
'weighted_softmax'
:
config
=
loss_config
.
weighted_softmax
return
losses
.
WeightedSoftmaxClassificationLoss
(
anchorwise_output
=
config
.
anchorwise_output
)
if
loss_type
==
'bootstrapped_sigmoid'
:
config
=
loss_config
.
bootstrapped_sigmoid
return
losses
.
BootstrappedSigmoidClassificationLoss
(
alpha
=
config
.
alpha
,
bootstrap_type
=
(
'hard'
if
config
.
hard_bootstrap
else
'soft'
),
anchorwise_output
=
config
.
anchorwise_output
)
raise
ValueError
(
'Empty loss config.'
)
object_detection/builders/losses_builder_test.py
0 → 100644
View file @
f282f6ef
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for losses_builder."""
import
tensorflow
as
tf
from
google.protobuf
import
text_format
from
object_detection.builders
import
losses_builder
from
object_detection.core
import
losses
from
object_detection.protos
import
losses_pb2
class
LocalizationLossBuilderTest
(
tf
.
test
.
TestCase
):
def
test_build_weighted_l2_localization_loss
(
self
):
losses_text_proto
=
"""
localization_loss {
weighted_l2 {
}
}
classification_loss {
weighted_softmax {
}
}
"""
losses_proto
=
losses_pb2
.
Loss
()
text_format
.
Merge
(
losses_text_proto
,
losses_proto
)
_
,
localization_loss
,
_
,
_
,
_
=
losses_builder
.
build
(
losses_proto
)
self
.
assertTrue
(
isinstance
(
localization_loss
,
losses
.
WeightedL2LocalizationLoss
))
def
test_build_weighted_smooth_l1_localization_loss
(
self
):
losses_text_proto
=
"""
localization_loss {
weighted_smooth_l1 {
}
}
classification_loss {
weighted_softmax {
}
}
"""
losses_proto
=
losses_pb2
.
Loss
()
text_format
.
Merge
(
losses_text_proto
,
losses_proto
)
_
,
localization_loss
,
_
,
_
,
_
=
losses_builder
.
build
(
losses_proto
)
self
.
assertTrue
(
isinstance
(
localization_loss
,
losses
.
WeightedSmoothL1LocalizationLoss
))
def
test_build_weighted_iou_localization_loss
(
self
):
losses_text_proto
=
"""
localization_loss {
weighted_iou {
}
}
classification_loss {
weighted_softmax {
}
}
"""
losses_proto
=
losses_pb2
.
Loss
()
text_format
.
Merge
(
losses_text_proto
,
losses_proto
)
_
,
localization_loss
,
_
,
_
,
_
=
losses_builder
.
build
(
losses_proto
)
self
.
assertTrue
(
isinstance
(
localization_loss
,
losses
.
WeightedIOULocalizationLoss
))
def
test_anchorwise_output
(
self
):
losses_text_proto
=
"""
localization_loss {
weighted_smooth_l1 {
anchorwise_output: true
}
}
classification_loss {
weighted_softmax {
}
}
"""
losses_proto
=
losses_pb2
.
Loss
()
text_format
.
Merge
(
losses_text_proto
,
losses_proto
)
_
,
localization_loss
,
_
,
_
,
_
=
losses_builder
.
build
(
losses_proto
)
self
.
assertTrue
(
isinstance
(
localization_loss
,
losses
.
WeightedSmoothL1LocalizationLoss
))
predictions
=
tf
.
constant
([[[
0.0
,
0.0
,
1.0
,
1.0
],
[
0.0
,
0.0
,
1.0
,
1.0
]]])
targets
=
tf
.
constant
([[[
0.0
,
0.0
,
1.0
,
1.0
],
[
0.0
,
0.0
,
1.0
,
1.0
]]])
weights
=
tf
.
constant
([[
1.0
,
1.0
]])
loss
=
localization_loss
(
predictions
,
targets
,
weights
=
weights
)
self
.
assertEqual
(
loss
.
shape
,
[
1
,
2
])
def
test_raise_error_on_empty_localization_config
(
self
):
losses_text_proto
=
"""
classification_loss {
weighted_softmax {
}
}
"""
losses_proto
=
losses_pb2
.
Loss
()
text_format
.
Merge
(
losses_text_proto
,
losses_proto
)
with
self
.
assertRaises
(
ValueError
):
losses_builder
.
_build_localization_loss
(
losses_proto
)
class
ClassificationLossBuilderTest
(
tf
.
test
.
TestCase
):
def
test_build_weighted_sigmoid_classification_loss
(
self
):
losses_text_proto
=
"""
classification_loss {
weighted_sigmoid {
}
}
localization_loss {
weighted_l2 {
}
}
"""
losses_proto
=
losses_pb2
.
Loss
()
text_format
.
Merge
(
losses_text_proto
,
losses_proto
)
classification_loss
,
_
,
_
,
_
,
_
=
losses_builder
.
build
(
losses_proto
)
self
.
assertTrue
(
isinstance
(
classification_loss
,
losses
.
WeightedSigmoidClassificationLoss
))
def
test_build_weighted_softmax_classification_loss
(
self
):
losses_text_proto
=
"""
classification_loss {
weighted_softmax {
}
}
localization_loss {
weighted_l2 {
}
}
"""
losses_proto
=
losses_pb2
.
Loss
()
text_format
.
Merge
(
losses_text_proto
,
losses_proto
)
classification_loss
,
_
,
_
,
_
,
_
=
losses_builder
.
build
(
losses_proto
)
self
.
assertTrue
(
isinstance
(
classification_loss
,
losses
.
WeightedSoftmaxClassificationLoss
))
def
test_build_bootstrapped_sigmoid_classification_loss
(
self
):
losses_text_proto
=
"""
classification_loss {
bootstrapped_sigmoid {
alpha: 0.5
}
}
localization_loss {
weighted_l2 {
}
}
"""
losses_proto
=
losses_pb2
.
Loss
()
text_format
.
Merge
(
losses_text_proto
,
losses_proto
)
classification_loss
,
_
,
_
,
_
,
_
=
losses_builder
.
build
(
losses_proto
)
self
.
assertTrue
(
isinstance
(
classification_loss
,
losses
.
BootstrappedSigmoidClassificationLoss
))
def
test_anchorwise_output
(
self
):
losses_text_proto
=
"""
classification_loss {
weighted_sigmoid {
anchorwise_output: true
}
}
localization_loss {
weighted_l2 {
}
}
"""
losses_proto
=
losses_pb2
.
Loss
()
text_format
.
Merge
(
losses_text_proto
,
losses_proto
)
classification_loss
,
_
,
_
,
_
,
_
=
losses_builder
.
build
(
losses_proto
)
self
.
assertTrue
(
isinstance
(
classification_loss
,
losses
.
WeightedSigmoidClassificationLoss
))
predictions
=
tf
.
constant
([[[
0.0
,
1.0
,
0.0
],
[
0.0
,
0.5
,
0.5
]]])
targets
=
tf
.
constant
([[[
0.0
,
1.0
,
0.0
],
[
0.0
,
0.0
,
1.0
]]])
weights
=
tf
.
constant
([[
1.0
,
1.0
]])
loss
=
classification_loss
(
predictions
,
targets
,
weights
=
weights
)
self
.
assertEqual
(
loss
.
shape
,
[
1
,
2
])
def
test_raise_error_on_empty_config
(
self
):
losses_text_proto
=
"""
localization_loss {
weighted_l2 {
}
}
"""
losses_proto
=
losses_pb2
.
Loss
()
text_format
.
Merge
(
losses_text_proto
,
losses_proto
)
with
self
.
assertRaises
(
ValueError
):
losses_builder
.
build
(
losses_proto
)
class
HardExampleMinerBuilderTest
(
tf
.
test
.
TestCase
):
def
test_do_not_build_hard_example_miner_by_default
(
self
):
losses_text_proto
=
"""
localization_loss {
weighted_l2 {
}
}
classification_loss {
weighted_softmax {
}
}
"""
losses_proto
=
losses_pb2
.
Loss
()
text_format
.
Merge
(
losses_text_proto
,
losses_proto
)
_
,
_
,
_
,
_
,
hard_example_miner
=
losses_builder
.
build
(
losses_proto
)
self
.
assertEqual
(
hard_example_miner
,
None
)
def
test_build_hard_example_miner_for_classification_loss
(
self
):
losses_text_proto
=
"""
localization_loss {
weighted_l2 {
}
}
classification_loss {
weighted_softmax {
}
}
hard_example_miner {
loss_type: CLASSIFICATION
}
"""
losses_proto
=
losses_pb2
.
Loss
()
text_format
.
Merge
(
losses_text_proto
,
losses_proto
)
_
,
_
,
_
,
_
,
hard_example_miner
=
losses_builder
.
build
(
losses_proto
)
self
.
assertTrue
(
isinstance
(
hard_example_miner
,
losses
.
HardExampleMiner
))
self
.
assertEqual
(
hard_example_miner
.
_loss_type
,
'cls'
)
def
test_build_hard_example_miner_for_localization_loss
(
self
):
losses_text_proto
=
"""
localization_loss {
weighted_l2 {
}
}
classification_loss {
weighted_softmax {
}
}
hard_example_miner {
loss_type: LOCALIZATION
}
"""
losses_proto
=
losses_pb2
.
Loss
()
text_format
.
Merge
(
losses_text_proto
,
losses_proto
)
_
,
_
,
_
,
_
,
hard_example_miner
=
losses_builder
.
build
(
losses_proto
)
self
.
assertTrue
(
isinstance
(
hard_example_miner
,
losses
.
HardExampleMiner
))
self
.
assertEqual
(
hard_example_miner
.
_loss_type
,
'loc'
)
def
test_build_hard_example_miner_with_non_default_values
(
self
):
losses_text_proto
=
"""
localization_loss {
weighted_l2 {
}
}
classification_loss {
weighted_softmax {
}
}
hard_example_miner {
num_hard_examples: 32
iou_threshold: 0.5
loss_type: LOCALIZATION
max_negatives_per_positive: 10
min_negatives_per_image: 3
}
"""
losses_proto
=
losses_pb2
.
Loss
()
text_format
.
Merge
(
losses_text_proto
,
losses_proto
)
_
,
_
,
_
,
_
,
hard_example_miner
=
losses_builder
.
build
(
losses_proto
)
self
.
assertTrue
(
isinstance
(
hard_example_miner
,
losses
.
HardExampleMiner
))
self
.
assertEqual
(
hard_example_miner
.
_num_hard_examples
,
32
)
self
.
assertAlmostEqual
(
hard_example_miner
.
_iou_threshold
,
0.5
)
self
.
assertEqual
(
hard_example_miner
.
_max_negatives_per_positive
,
10
)
self
.
assertEqual
(
hard_example_miner
.
_min_negatives_per_image
,
3
)
class
LossBuilderTest
(
tf
.
test
.
TestCase
):
def
test_build_all_loss_parameters
(
self
):
losses_text_proto
=
"""
localization_loss {
weighted_l2 {
}
}
classification_loss {
weighted_softmax {
}
}
hard_example_miner {
}
classification_weight: 0.8
localization_weight: 0.2
"""
losses_proto
=
losses_pb2
.
Loss
()
text_format
.
Merge
(
losses_text_proto
,
losses_proto
)
(
classification_loss
,
localization_loss
,
classification_weight
,
localization_weight
,
hard_example_miner
)
=
losses_builder
.
build
(
losses_proto
)
self
.
assertTrue
(
isinstance
(
hard_example_miner
,
losses
.
HardExampleMiner
))
self
.
assertTrue
(
isinstance
(
classification_loss
,
losses
.
WeightedSoftmaxClassificationLoss
))
self
.
assertTrue
(
isinstance
(
localization_loss
,
losses
.
WeightedL2LocalizationLoss
))
self
.
assertAlmostEqual
(
classification_weight
,
0.8
)
self
.
assertAlmostEqual
(
localization_weight
,
0.2
)
if
__name__
==
'__main__'
:
tf
.
test
.
main
()
object_detection/builders/matcher_builder.py
0 → 100644
View file @
f282f6ef
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A function to build an object detection matcher from configuration."""
from
object_detection.matchers
import
argmax_matcher
from
object_detection.matchers
import
bipartite_matcher
from
object_detection.protos
import
matcher_pb2
def
build
(
matcher_config
):
"""Builds a matcher object based on the matcher config.
Args:
matcher_config: A matcher.proto object containing the config for the desired
Matcher.
Returns:
Matcher based on the config.
Raises:
ValueError: On empty matcher proto.
"""
if
not
isinstance
(
matcher_config
,
matcher_pb2
.
Matcher
):
raise
ValueError
(
'matcher_config not of type matcher_pb2.Matcher.'
)
if
matcher_config
.
WhichOneof
(
'matcher_oneof'
)
==
'argmax_matcher'
:
matcher
=
matcher_config
.
argmax_matcher
matched_threshold
=
unmatched_threshold
=
None
if
not
matcher
.
ignore_thresholds
:
matched_threshold
=
matcher
.
matched_threshold
unmatched_threshold
=
matcher
.
unmatched_threshold
return
argmax_matcher
.
ArgMaxMatcher
(
matched_threshold
=
matched_threshold
,
unmatched_threshold
=
unmatched_threshold
,
negatives_lower_than_unmatched
=
matcher
.
negatives_lower_than_unmatched
,
force_match_for_each_row
=
matcher
.
force_match_for_each_row
)
if
matcher_config
.
WhichOneof
(
'matcher_oneof'
)
==
'bipartite_matcher'
:
return
bipartite_matcher
.
GreedyBipartiteMatcher
()
raise
ValueError
(
'Empty matcher.'
)
Prev
1
2
3
4
5
6
7
8
9
10
…
16
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment