Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ModelZoo
ResNet50_tensorflow
Commits
83a9a239
Unverified
Commit
83a9a239
authored
Aug 10, 2018
by
Taylor Robie
Committed by
GitHub
Aug 10, 2018
Browse files
remove fork of object_detection created for MLPerf. (#4840)
parent
ad3526a9
Changes
172
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
0 additions
and
3219 deletions
+0
-3219
research/mlperf_object_detection/Mask_RCNN/object_detection/box_coders/mean_stddev_box_coder_test.py
...object_detection/box_coders/mean_stddev_box_coder_test.py
+0
-54
research/mlperf_object_detection/Mask_RCNN/object_detection/box_coders/square_box_coder.py
...Mask_RCNN/object_detection/box_coders/square_box_coder.py
+0
-126
research/mlperf_object_detection/Mask_RCNN/object_detection/box_coders/square_box_coder_test.py
...RCNN/object_detection/box_coders/square_box_coder_test.py
+0
-97
research/mlperf_object_detection/Mask_RCNN/object_detection/builders/__init__.py
...detection/Mask_RCNN/object_detection/builders/__init__.py
+0
-0
research/mlperf_object_detection/Mask_RCNN/object_detection/builders/anchor_generator_builder.py
...CNN/object_detection/builders/anchor_generator_builder.py
+0
-94
research/mlperf_object_detection/Mask_RCNN/object_detection/builders/anchor_generator_builder_test.py
...bject_detection/builders/anchor_generator_builder_test.py
+0
-300
research/mlperf_object_detection/Mask_RCNN/object_detection/builders/box_coder_builder.py
.../Mask_RCNN/object_detection/builders/box_coder_builder.py
+0
-66
research/mlperf_object_detection/Mask_RCNN/object_detection/builders/box_coder_builder_test.py
..._RCNN/object_detection/builders/box_coder_builder_test.py
+0
-136
research/mlperf_object_detection/Mask_RCNN/object_detection/builders/box_predictor_builder.py
...k_RCNN/object_detection/builders/box_predictor_builder.py
+0
-138
research/mlperf_object_detection/Mask_RCNN/object_detection/builders/box_predictor_builder_test.py
...N/object_detection/builders/box_predictor_builder_test.py
+0
-514
research/mlperf_object_detection/Mask_RCNN/object_detection/builders/dataset_builder.py
...on/Mask_RCNN/object_detection/builders/dataset_builder.py
+0
-196
research/mlperf_object_detection/Mask_RCNN/object_detection/builders/dataset_builder_test.py
...sk_RCNN/object_detection/builders/dataset_builder_test.py
+0
-260
research/mlperf_object_detection/Mask_RCNN/object_detection/builders/graph_rewriter_builder.py
..._RCNN/object_detection/builders/graph_rewriter_builder.py
+0
-42
research/mlperf_object_detection/Mask_RCNN/object_detection/builders/graph_rewriter_builder_test.py
.../object_detection/builders/graph_rewriter_builder_test.py
+0
-57
research/mlperf_object_detection/Mask_RCNN/object_detection/builders/hyperparams_builder.py
...ask_RCNN/object_detection/builders/hyperparams_builder.py
+0
-182
research/mlperf_object_detection/Mask_RCNN/object_detection/builders/hyperparams_builder_test.py
...CNN/object_detection/builders/hyperparams_builder_test.py
+0
-509
research/mlperf_object_detection/Mask_RCNN/object_detection/builders/image_resizer_builder.py
...k_RCNN/object_detection/builders/image_resizer_builder.py
+0
-115
research/mlperf_object_detection/Mask_RCNN/object_detection/builders/image_resizer_builder_test.py
...N/object_detection/builders/image_resizer_builder_test.py
+0
-113
research/mlperf_object_detection/Mask_RCNN/object_detection/builders/input_reader_builder.py
...sk_RCNN/object_detection/builders/input_reader_builder.py
+0
-76
research/mlperf_object_detection/Mask_RCNN/object_detection/builders/input_reader_builder_test.py
...NN/object_detection/builders/input_reader_builder_test.py
+0
-144
No files found.
Too many changes to show.
To preserve performance only
172 of 172+
files are displayed.
Plain diff
Email patch
research/mlperf_object_detection/Mask_RCNN/object_detection/box_coders/mean_stddev_box_coder_test.py
deleted
100644 → 0
View file @
ad3526a9
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.box_coder.mean_stddev_boxcoder."""
import
tensorflow
as
tf
from
object_detection.box_coders
import
mean_stddev_box_coder
from
object_detection.core
import
box_list
class
MeanStddevBoxCoderTest
(
tf
.
test
.
TestCase
):
def
testGetCorrectRelativeCodesAfterEncoding
(
self
):
box_corners
=
[[
0.0
,
0.0
,
0.5
,
0.5
],
[
0.0
,
0.0
,
0.5
,
0.5
]]
boxes
=
box_list
.
BoxList
(
tf
.
constant
(
box_corners
))
expected_rel_codes
=
[[
0.0
,
0.0
,
0.0
,
0.0
],
[
-
5.0
,
-
5.0
,
-
5.0
,
-
3.0
]]
prior_means
=
tf
.
constant
([[
0.0
,
0.0
,
0.5
,
0.5
],
[
0.5
,
0.5
,
1.0
,
0.8
]])
priors
=
box_list
.
BoxList
(
prior_means
)
coder
=
mean_stddev_box_coder
.
MeanStddevBoxCoder
(
stddev
=
0.1
)
rel_codes
=
coder
.
encode
(
boxes
,
priors
)
with
self
.
test_session
()
as
sess
:
rel_codes_out
=
sess
.
run
(
rel_codes
)
self
.
assertAllClose
(
rel_codes_out
,
expected_rel_codes
)
def
testGetCorrectBoxesAfterDecoding
(
self
):
rel_codes
=
tf
.
constant
([[
0.0
,
0.0
,
0.0
,
0.0
],
[
-
5.0
,
-
5.0
,
-
5.0
,
-
3.0
]])
expected_box_corners
=
[[
0.0
,
0.0
,
0.5
,
0.5
],
[
0.0
,
0.0
,
0.5
,
0.5
]]
prior_means
=
tf
.
constant
([[
0.0
,
0.0
,
0.5
,
0.5
],
[
0.5
,
0.5
,
1.0
,
0.8
]])
priors
=
box_list
.
BoxList
(
prior_means
)
coder
=
mean_stddev_box_coder
.
MeanStddevBoxCoder
(
stddev
=
0.1
)
decoded_boxes
=
coder
.
decode
(
rel_codes
,
priors
)
decoded_box_corners
=
decoded_boxes
.
get
()
with
self
.
test_session
()
as
sess
:
decoded_out
=
sess
.
run
(
decoded_box_corners
)
self
.
assertAllClose
(
decoded_out
,
expected_box_corners
)
if
__name__
==
'__main__'
:
tf
.
test
.
main
()
research/mlperf_object_detection/Mask_RCNN/object_detection/box_coders/square_box_coder.py
deleted
100644 → 0
View file @
ad3526a9
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Square box coder.
Square box coder follows the coding schema described below:
l = sqrt(h * w)
la = sqrt(ha * wa)
ty = (y - ya) / la
tx = (x - xa) / la
tl = log(l / la)
where x, y, w, h denote the box's center coordinates, width, and height,
respectively. Similarly, xa, ya, wa, ha denote the anchor's center
coordinates, width and height. tx, ty, tl denote the anchor-encoded
center, and length, respectively. Because the encoded box is a square, only
one length is encoded.
This has shown to provide performance improvements over the Faster RCNN box
coder when the objects being detected tend to be square (e.g. faces) and when
the input images are not distorted via resizing.
"""
import
tensorflow
as
tf
from
object_detection.core
import
box_coder
from
object_detection.core
import
box_list
EPSILON
=
1e-8
class
SquareBoxCoder
(
box_coder
.
BoxCoder
):
"""Encodes a 3-scalar representation of a square box."""
def
__init__
(
self
,
scale_factors
=
None
):
"""Constructor for SquareBoxCoder.
Args:
scale_factors: List of 3 positive scalars to scale ty, tx, and tl.
If set to None, does not perform scaling. For faster RCNN,
the open-source implementation recommends using [10.0, 10.0, 5.0].
Raises:
ValueError: If scale_factors is not length 3 or contains values less than
or equal to 0.
"""
if
scale_factors
:
if
len
(
scale_factors
)
!=
3
:
raise
ValueError
(
'The argument scale_factors must be a list of length '
'3.'
)
if
any
(
scalar
<=
0
for
scalar
in
scale_factors
):
raise
ValueError
(
'The values in scale_factors must all be greater '
'than 0.'
)
self
.
_scale_factors
=
scale_factors
@
property
def
code_size
(
self
):
return
3
def
_encode
(
self
,
boxes
,
anchors
):
"""Encodes a box collection with respect to an anchor collection.
Args:
boxes: BoxList holding N boxes to be encoded.
anchors: BoxList of anchors.
Returns:
a tensor representing N anchor-encoded boxes of the format
[ty, tx, tl].
"""
# Convert anchors to the center coordinate representation.
ycenter_a
,
xcenter_a
,
ha
,
wa
=
anchors
.
get_center_coordinates_and_sizes
()
la
=
tf
.
sqrt
(
ha
*
wa
)
ycenter
,
xcenter
,
h
,
w
=
boxes
.
get_center_coordinates_and_sizes
()
l
=
tf
.
sqrt
(
h
*
w
)
# Avoid NaN in division and log below.
la
+=
EPSILON
l
+=
EPSILON
tx
=
(
xcenter
-
xcenter_a
)
/
la
ty
=
(
ycenter
-
ycenter_a
)
/
la
tl
=
tf
.
log
(
l
/
la
)
# Scales location targets for joint training.
if
self
.
_scale_factors
:
ty
*=
self
.
_scale_factors
[
0
]
tx
*=
self
.
_scale_factors
[
1
]
tl
*=
self
.
_scale_factors
[
2
]
return
tf
.
transpose
(
tf
.
stack
([
ty
,
tx
,
tl
]))
def
_decode
(
self
,
rel_codes
,
anchors
):
"""Decodes relative codes to boxes.
Args:
rel_codes: a tensor representing N anchor-encoded boxes.
anchors: BoxList of anchors.
Returns:
boxes: BoxList holding N bounding boxes.
"""
ycenter_a
,
xcenter_a
,
ha
,
wa
=
anchors
.
get_center_coordinates_and_sizes
()
la
=
tf
.
sqrt
(
ha
*
wa
)
ty
,
tx
,
tl
=
tf
.
unstack
(
tf
.
transpose
(
rel_codes
))
if
self
.
_scale_factors
:
ty
/=
self
.
_scale_factors
[
0
]
tx
/=
self
.
_scale_factors
[
1
]
tl
/=
self
.
_scale_factors
[
2
]
l
=
tf
.
exp
(
tl
)
*
la
ycenter
=
ty
*
la
+
ycenter_a
xcenter
=
tx
*
la
+
xcenter_a
ymin
=
ycenter
-
l
/
2.
xmin
=
xcenter
-
l
/
2.
ymax
=
ycenter
+
l
/
2.
xmax
=
xcenter
+
l
/
2.
return
box_list
.
BoxList
(
tf
.
transpose
(
tf
.
stack
([
ymin
,
xmin
,
ymax
,
xmax
])))
research/mlperf_object_detection/Mask_RCNN/object_detection/box_coders/square_box_coder_test.py
deleted
100644 → 0
View file @
ad3526a9
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.box_coder.square_box_coder."""
import
tensorflow
as
tf
from
object_detection.box_coders
import
square_box_coder
from
object_detection.core
import
box_list
class
SquareBoxCoderTest
(
tf
.
test
.
TestCase
):
def
test_correct_relative_codes_with_default_scale
(
self
):
boxes
=
[[
10.0
,
10.0
,
20.0
,
15.0
],
[
0.2
,
0.1
,
0.5
,
0.4
]]
anchors
=
[[
15.0
,
12.0
,
30.0
,
18.0
],
[
0.1
,
0.0
,
0.7
,
0.9
]]
scale_factors
=
None
expected_rel_codes
=
[[
-
0.790569
,
-
0.263523
,
-
0.293893
],
[
-
0.068041
,
-
0.272166
,
-
0.89588
]]
boxes
=
box_list
.
BoxList
(
tf
.
constant
(
boxes
))
anchors
=
box_list
.
BoxList
(
tf
.
constant
(
anchors
))
coder
=
square_box_coder
.
SquareBoxCoder
(
scale_factors
=
scale_factors
)
rel_codes
=
coder
.
encode
(
boxes
,
anchors
)
with
self
.
test_session
()
as
sess
:
(
rel_codes_out
,)
=
sess
.
run
([
rel_codes
])
self
.
assertAllClose
(
rel_codes_out
,
expected_rel_codes
)
def
test_correct_relative_codes_with_non_default_scale
(
self
):
boxes
=
[[
10.0
,
10.0
,
20.0
,
15.0
],
[
0.2
,
0.1
,
0.5
,
0.4
]]
anchors
=
[[
15.0
,
12.0
,
30.0
,
18.0
],
[
0.1
,
0.0
,
0.7
,
0.9
]]
scale_factors
=
[
2
,
3
,
4
]
expected_rel_codes
=
[[
-
1.581139
,
-
0.790569
,
-
1.175573
],
[
-
0.136083
,
-
0.816497
,
-
3.583519
]]
boxes
=
box_list
.
BoxList
(
tf
.
constant
(
boxes
))
anchors
=
box_list
.
BoxList
(
tf
.
constant
(
anchors
))
coder
=
square_box_coder
.
SquareBoxCoder
(
scale_factors
=
scale_factors
)
rel_codes
=
coder
.
encode
(
boxes
,
anchors
)
with
self
.
test_session
()
as
sess
:
(
rel_codes_out
,)
=
sess
.
run
([
rel_codes
])
self
.
assertAllClose
(
rel_codes_out
,
expected_rel_codes
)
def
test_correct_relative_codes_with_small_width
(
self
):
boxes
=
[[
10.0
,
10.0
,
10.0000001
,
20.0
]]
anchors
=
[[
15.0
,
12.0
,
30.0
,
18.0
]]
scale_factors
=
None
expected_rel_codes
=
[[
-
1.317616
,
0.
,
-
20.670586
]]
boxes
=
box_list
.
BoxList
(
tf
.
constant
(
boxes
))
anchors
=
box_list
.
BoxList
(
tf
.
constant
(
anchors
))
coder
=
square_box_coder
.
SquareBoxCoder
(
scale_factors
=
scale_factors
)
rel_codes
=
coder
.
encode
(
boxes
,
anchors
)
with
self
.
test_session
()
as
sess
:
(
rel_codes_out
,)
=
sess
.
run
([
rel_codes
])
self
.
assertAllClose
(
rel_codes_out
,
expected_rel_codes
)
def
test_correct_boxes_with_default_scale
(
self
):
anchors
=
[[
15.0
,
12.0
,
30.0
,
18.0
],
[
0.1
,
0.0
,
0.7
,
0.9
]]
rel_codes
=
[[
-
0.5
,
-
0.416666
,
-
0.405465
],
[
-
0.083333
,
-
0.222222
,
-
0.693147
]]
scale_factors
=
None
expected_boxes
=
[[
14.594306
,
7.884875
,
20.918861
,
14.209432
],
[
0.155051
,
0.102989
,
0.522474
,
0.470412
]]
anchors
=
box_list
.
BoxList
(
tf
.
constant
(
anchors
))
coder
=
square_box_coder
.
SquareBoxCoder
(
scale_factors
=
scale_factors
)
boxes
=
coder
.
decode
(
rel_codes
,
anchors
)
with
self
.
test_session
()
as
sess
:
(
boxes_out
,)
=
sess
.
run
([
boxes
.
get
()])
self
.
assertAllClose
(
boxes_out
,
expected_boxes
)
def
test_correct_boxes_with_non_default_scale
(
self
):
anchors
=
[[
15.0
,
12.0
,
30.0
,
18.0
],
[
0.1
,
0.0
,
0.7
,
0.9
]]
rel_codes
=
[[
-
1.
,
-
1.25
,
-
1.62186
],
[
-
0.166667
,
-
0.666667
,
-
2.772588
]]
scale_factors
=
[
2
,
3
,
4
]
expected_boxes
=
[[
14.594306
,
7.884875
,
20.918861
,
14.209432
],
[
0.155051
,
0.102989
,
0.522474
,
0.470412
]]
anchors
=
box_list
.
BoxList
(
tf
.
constant
(
anchors
))
coder
=
square_box_coder
.
SquareBoxCoder
(
scale_factors
=
scale_factors
)
boxes
=
coder
.
decode
(
rel_codes
,
anchors
)
with
self
.
test_session
()
as
sess
:
(
boxes_out
,)
=
sess
.
run
([
boxes
.
get
()])
self
.
assertAllClose
(
boxes_out
,
expected_boxes
)
if
__name__
==
'__main__'
:
tf
.
test
.
main
()
research/mlperf_object_detection/Mask_RCNN/object_detection/builders/__init__.py
deleted
100644 → 0
View file @
ad3526a9
research/mlperf_object_detection/Mask_RCNN/object_detection/builders/anchor_generator_builder.py
deleted
100644 → 0
View file @
ad3526a9
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A function to build an object detection anchor generator from config."""
from
object_detection.anchor_generators
import
grid_anchor_generator
from
object_detection.anchor_generators
import
multiple_grid_anchor_generator
from
object_detection.anchor_generators
import
multiscale_grid_anchor_generator
from
object_detection.protos
import
anchor_generator_pb2
def
build
(
anchor_generator_config
):
"""Builds an anchor generator based on the config.
Args:
anchor_generator_config: An anchor_generator.proto object containing the
config for the desired anchor generator.
Returns:
Anchor generator based on the config.
Raises:
ValueError: On empty anchor generator proto.
"""
if
not
isinstance
(
anchor_generator_config
,
anchor_generator_pb2
.
AnchorGenerator
):
raise
ValueError
(
'anchor_generator_config not of type '
'anchor_generator_pb2.AnchorGenerator'
)
if
anchor_generator_config
.
WhichOneof
(
'anchor_generator_oneof'
)
==
'grid_anchor_generator'
:
grid_anchor_generator_config
=
anchor_generator_config
.
grid_anchor_generator
return
grid_anchor_generator
.
GridAnchorGenerator
(
scales
=
[
float
(
scale
)
for
scale
in
grid_anchor_generator_config
.
scales
],
aspect_ratios
=
[
float
(
aspect_ratio
)
for
aspect_ratio
in
grid_anchor_generator_config
.
aspect_ratios
],
base_anchor_size
=
[
grid_anchor_generator_config
.
height
,
grid_anchor_generator_config
.
width
],
anchor_stride
=
[
grid_anchor_generator_config
.
height_stride
,
grid_anchor_generator_config
.
width_stride
],
anchor_offset
=
[
grid_anchor_generator_config
.
height_offset
,
grid_anchor_generator_config
.
width_offset
])
elif
anchor_generator_config
.
WhichOneof
(
'anchor_generator_oneof'
)
==
'ssd_anchor_generator'
:
ssd_anchor_generator_config
=
anchor_generator_config
.
ssd_anchor_generator
anchor_strides
=
None
if
ssd_anchor_generator_config
.
height_stride
:
anchor_strides
=
zip
(
ssd_anchor_generator_config
.
height_stride
,
ssd_anchor_generator_config
.
width_stride
)
anchor_offsets
=
None
if
ssd_anchor_generator_config
.
height_offset
:
anchor_offsets
=
zip
(
ssd_anchor_generator_config
.
height_offset
,
ssd_anchor_generator_config
.
width_offset
)
return
multiple_grid_anchor_generator
.
create_ssd_anchors
(
num_layers
=
ssd_anchor_generator_config
.
num_layers
,
min_scale
=
ssd_anchor_generator_config
.
min_scale
,
max_scale
=
ssd_anchor_generator_config
.
max_scale
,
scales
=
[
float
(
scale
)
for
scale
in
ssd_anchor_generator_config
.
scales
],
aspect_ratios
=
ssd_anchor_generator_config
.
aspect_ratios
,
interpolated_scale_aspect_ratio
=
(
ssd_anchor_generator_config
.
interpolated_scale_aspect_ratio
),
base_anchor_size
=
[
ssd_anchor_generator_config
.
base_anchor_height
,
ssd_anchor_generator_config
.
base_anchor_width
],
anchor_strides
=
anchor_strides
,
anchor_offsets
=
anchor_offsets
,
reduce_boxes_in_lowest_layer
=
(
ssd_anchor_generator_config
.
reduce_boxes_in_lowest_layer
))
elif
anchor_generator_config
.
WhichOneof
(
'anchor_generator_oneof'
)
==
'multiscale_anchor_generator'
:
cfg
=
anchor_generator_config
.
multiscale_anchor_generator
return
multiscale_grid_anchor_generator
.
MultiscaleGridAnchorGenerator
(
cfg
.
min_level
,
cfg
.
max_level
,
cfg
.
anchor_scale
,
[
float
(
aspect_ratio
)
for
aspect_ratio
in
cfg
.
aspect_ratios
],
cfg
.
scales_per_octave
,
cfg
.
normalize_coordinates
)
else
:
raise
ValueError
(
'Empty anchor generator.'
)
research/mlperf_object_detection/Mask_RCNN/object_detection/builders/anchor_generator_builder_test.py
deleted
100644 → 0
View file @
ad3526a9
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for anchor_generator_builder."""
import
math
import
tensorflow
as
tf
from
google.protobuf
import
text_format
from
object_detection.anchor_generators
import
grid_anchor_generator
from
object_detection.anchor_generators
import
multiple_grid_anchor_generator
from
object_detection.anchor_generators
import
multiscale_grid_anchor_generator
from
object_detection.builders
import
anchor_generator_builder
from
object_detection.protos
import
anchor_generator_pb2
class
AnchorGeneratorBuilderTest
(
tf
.
test
.
TestCase
):
def
assert_almost_list_equal
(
self
,
expected_list
,
actual_list
,
delta
=
None
):
self
.
assertEqual
(
len
(
expected_list
),
len
(
actual_list
))
for
expected_item
,
actual_item
in
zip
(
expected_list
,
actual_list
):
self
.
assertAlmostEqual
(
expected_item
,
actual_item
,
delta
=
delta
)
def
test_build_grid_anchor_generator_with_defaults
(
self
):
anchor_generator_text_proto
=
"""
grid_anchor_generator {
}
"""
anchor_generator_proto
=
anchor_generator_pb2
.
AnchorGenerator
()
text_format
.
Merge
(
anchor_generator_text_proto
,
anchor_generator_proto
)
anchor_generator_object
=
anchor_generator_builder
.
build
(
anchor_generator_proto
)
self
.
assertTrue
(
isinstance
(
anchor_generator_object
,
grid_anchor_generator
.
GridAnchorGenerator
))
self
.
assertListEqual
(
anchor_generator_object
.
_scales
,
[])
self
.
assertListEqual
(
anchor_generator_object
.
_aspect_ratios
,
[])
with
self
.
test_session
()
as
sess
:
base_anchor_size
,
anchor_offset
,
anchor_stride
=
sess
.
run
(
[
anchor_generator_object
.
_base_anchor_size
,
anchor_generator_object
.
_anchor_offset
,
anchor_generator_object
.
_anchor_stride
])
self
.
assertAllEqual
(
anchor_offset
,
[
0
,
0
])
self
.
assertAllEqual
(
anchor_stride
,
[
16
,
16
])
self
.
assertAllEqual
(
base_anchor_size
,
[
256
,
256
])
def
test_build_grid_anchor_generator_with_non_default_parameters
(
self
):
anchor_generator_text_proto
=
"""
grid_anchor_generator {
height: 128
width: 512
height_stride: 10
width_stride: 20
height_offset: 30
width_offset: 40
scales: [0.4, 2.2]
aspect_ratios: [0.3, 4.5]
}
"""
anchor_generator_proto
=
anchor_generator_pb2
.
AnchorGenerator
()
text_format
.
Merge
(
anchor_generator_text_proto
,
anchor_generator_proto
)
anchor_generator_object
=
anchor_generator_builder
.
build
(
anchor_generator_proto
)
self
.
assertTrue
(
isinstance
(
anchor_generator_object
,
grid_anchor_generator
.
GridAnchorGenerator
))
self
.
assert_almost_list_equal
(
anchor_generator_object
.
_scales
,
[
0.4
,
2.2
])
self
.
assert_almost_list_equal
(
anchor_generator_object
.
_aspect_ratios
,
[
0.3
,
4.5
])
with
self
.
test_session
()
as
sess
:
base_anchor_size
,
anchor_offset
,
anchor_stride
=
sess
.
run
(
[
anchor_generator_object
.
_base_anchor_size
,
anchor_generator_object
.
_anchor_offset
,
anchor_generator_object
.
_anchor_stride
])
self
.
assertAllEqual
(
anchor_offset
,
[
30
,
40
])
self
.
assertAllEqual
(
anchor_stride
,
[
10
,
20
])
self
.
assertAllEqual
(
base_anchor_size
,
[
128
,
512
])
def
test_build_ssd_anchor_generator_with_defaults
(
self
):
anchor_generator_text_proto
=
"""
ssd_anchor_generator {
aspect_ratios: [1.0]
}
"""
anchor_generator_proto
=
anchor_generator_pb2
.
AnchorGenerator
()
text_format
.
Merge
(
anchor_generator_text_proto
,
anchor_generator_proto
)
anchor_generator_object
=
anchor_generator_builder
.
build
(
anchor_generator_proto
)
self
.
assertTrue
(
isinstance
(
anchor_generator_object
,
multiple_grid_anchor_generator
.
MultipleGridAnchorGenerator
))
for
actual_scales
,
expected_scales
in
zip
(
list
(
anchor_generator_object
.
_scales
),
[(
0.1
,
0.2
,
0.2
),
(
0.35
,
0.418
),
(
0.499
,
0.570
),
(
0.649
,
0.721
),
(
0.799
,
0.871
),
(
0.949
,
0.974
)]):
self
.
assert_almost_list_equal
(
expected_scales
,
actual_scales
,
delta
=
1e-2
)
for
actual_aspect_ratio
,
expected_aspect_ratio
in
zip
(
list
(
anchor_generator_object
.
_aspect_ratios
),
[(
1.0
,
2.0
,
0.5
)]
+
5
*
[(
1.0
,
1.0
)]):
self
.
assert_almost_list_equal
(
expected_aspect_ratio
,
actual_aspect_ratio
)
with
self
.
test_session
()
as
sess
:
base_anchor_size
=
sess
.
run
(
anchor_generator_object
.
_base_anchor_size
)
self
.
assertAllClose
(
base_anchor_size
,
[
1.0
,
1.0
])
def
test_build_ssd_anchor_generator_with_custom_scales
(
self
):
anchor_generator_text_proto
=
"""
ssd_anchor_generator {
aspect_ratios: [1.0]
scales: [0.1, 0.15, 0.2, 0.4, 0.6, 0.8]
reduce_boxes_in_lowest_layer: false
}
"""
anchor_generator_proto
=
anchor_generator_pb2
.
AnchorGenerator
()
text_format
.
Merge
(
anchor_generator_text_proto
,
anchor_generator_proto
)
anchor_generator_object
=
anchor_generator_builder
.
build
(
anchor_generator_proto
)
self
.
assertTrue
(
isinstance
(
anchor_generator_object
,
multiple_grid_anchor_generator
.
MultipleGridAnchorGenerator
))
for
actual_scales
,
expected_scales
in
zip
(
list
(
anchor_generator_object
.
_scales
),
[(
0.1
,
math
.
sqrt
(
0.1
*
0.15
)),
(
0.15
,
math
.
sqrt
(
0.15
*
0.2
)),
(
0.2
,
math
.
sqrt
(
0.2
*
0.4
)),
(
0.4
,
math
.
sqrt
(
0.4
*
0.6
)),
(
0.6
,
math
.
sqrt
(
0.6
*
0.8
)),
(
0.8
,
math
.
sqrt
(
0.8
*
1.0
))]):
self
.
assert_almost_list_equal
(
expected_scales
,
actual_scales
,
delta
=
1e-2
)
def
test_build_ssd_anchor_generator_with_custom_interpolated_scale
(
self
):
anchor_generator_text_proto
=
"""
ssd_anchor_generator {
aspect_ratios: [0.5]
interpolated_scale_aspect_ratio: 0.5
reduce_boxes_in_lowest_layer: false
}
"""
anchor_generator_proto
=
anchor_generator_pb2
.
AnchorGenerator
()
text_format
.
Merge
(
anchor_generator_text_proto
,
anchor_generator_proto
)
anchor_generator_object
=
anchor_generator_builder
.
build
(
anchor_generator_proto
)
self
.
assertTrue
(
isinstance
(
anchor_generator_object
,
multiple_grid_anchor_generator
.
MultipleGridAnchorGenerator
))
for
actual_aspect_ratio
,
expected_aspect_ratio
in
zip
(
list
(
anchor_generator_object
.
_aspect_ratios
),
6
*
[(
0.5
,
0.5
)]):
self
.
assert_almost_list_equal
(
expected_aspect_ratio
,
actual_aspect_ratio
)
def
test_build_ssd_anchor_generator_without_reduced_boxes
(
self
):
anchor_generator_text_proto
=
"""
ssd_anchor_generator {
aspect_ratios: [1.0]
reduce_boxes_in_lowest_layer: false
}
"""
anchor_generator_proto
=
anchor_generator_pb2
.
AnchorGenerator
()
text_format
.
Merge
(
anchor_generator_text_proto
,
anchor_generator_proto
)
anchor_generator_object
=
anchor_generator_builder
.
build
(
anchor_generator_proto
)
self
.
assertTrue
(
isinstance
(
anchor_generator_object
,
multiple_grid_anchor_generator
.
MultipleGridAnchorGenerator
))
for
actual_scales
,
expected_scales
in
zip
(
list
(
anchor_generator_object
.
_scales
),
[(
0.2
,
0.264
),
(
0.35
,
0.418
),
(
0.499
,
0.570
),
(
0.649
,
0.721
),
(
0.799
,
0.871
),
(
0.949
,
0.974
)]):
self
.
assert_almost_list_equal
(
expected_scales
,
actual_scales
,
delta
=
1e-2
)
for
actual_aspect_ratio
,
expected_aspect_ratio
in
zip
(
list
(
anchor_generator_object
.
_aspect_ratios
),
6
*
[(
1.0
,
1.0
)]):
self
.
assert_almost_list_equal
(
expected_aspect_ratio
,
actual_aspect_ratio
)
with
self
.
test_session
()
as
sess
:
base_anchor_size
=
sess
.
run
(
anchor_generator_object
.
_base_anchor_size
)
self
.
assertAllClose
(
base_anchor_size
,
[
1.0
,
1.0
])
def
test_build_ssd_anchor_generator_with_non_default_parameters
(
self
):
anchor_generator_text_proto
=
"""
ssd_anchor_generator {
num_layers: 2
min_scale: 0.3
max_scale: 0.8
aspect_ratios: [2.0]
height_stride: 16
height_stride: 32
width_stride: 20
width_stride: 30
height_offset: 8
height_offset: 16
width_offset: 0
width_offset: 10
}
"""
anchor_generator_proto
=
anchor_generator_pb2
.
AnchorGenerator
()
text_format
.
Merge
(
anchor_generator_text_proto
,
anchor_generator_proto
)
anchor_generator_object
=
anchor_generator_builder
.
build
(
anchor_generator_proto
)
self
.
assertTrue
(
isinstance
(
anchor_generator_object
,
multiple_grid_anchor_generator
.
MultipleGridAnchorGenerator
))
for
actual_scales
,
expected_scales
in
zip
(
list
(
anchor_generator_object
.
_scales
),
[(
0.1
,
0.3
,
0.3
),
(
0.8
,
0.894
)]):
self
.
assert_almost_list_equal
(
expected_scales
,
actual_scales
,
delta
=
1e-2
)
for
actual_aspect_ratio
,
expected_aspect_ratio
in
zip
(
list
(
anchor_generator_object
.
_aspect_ratios
),
[(
1.0
,
2.0
,
0.5
),
(
2.0
,
1.0
)]):
self
.
assert_almost_list_equal
(
expected_aspect_ratio
,
actual_aspect_ratio
)
for
actual_strides
,
expected_strides
in
zip
(
list
(
anchor_generator_object
.
_anchor_strides
),
[(
16
,
20
),
(
32
,
30
)]):
self
.
assert_almost_list_equal
(
expected_strides
,
actual_strides
)
for
actual_offsets
,
expected_offsets
in
zip
(
list
(
anchor_generator_object
.
_anchor_offsets
),
[(
8
,
0
),
(
16
,
10
)]):
self
.
assert_almost_list_equal
(
expected_offsets
,
actual_offsets
)
with
self
.
test_session
()
as
sess
:
base_anchor_size
=
sess
.
run
(
anchor_generator_object
.
_base_anchor_size
)
self
.
assertAllClose
(
base_anchor_size
,
[
1.0
,
1.0
])
def
test_raise_value_error_on_empty_anchor_genertor
(
self
):
anchor_generator_text_proto
=
"""
"""
anchor_generator_proto
=
anchor_generator_pb2
.
AnchorGenerator
()
text_format
.
Merge
(
anchor_generator_text_proto
,
anchor_generator_proto
)
with
self
.
assertRaises
(
ValueError
):
anchor_generator_builder
.
build
(
anchor_generator_proto
)
def
test_build_multiscale_anchor_generator_custom_aspect_ratios
(
self
):
anchor_generator_text_proto
=
"""
multiscale_anchor_generator {
aspect_ratios: [1.0]
}
"""
anchor_generator_proto
=
anchor_generator_pb2
.
AnchorGenerator
()
text_format
.
Merge
(
anchor_generator_text_proto
,
anchor_generator_proto
)
anchor_generator_object
=
anchor_generator_builder
.
build
(
anchor_generator_proto
)
self
.
assertTrue
(
isinstance
(
anchor_generator_object
,
multiscale_grid_anchor_generator
.
MultiscaleGridAnchorGenerator
))
for
level
,
anchor_grid_info
in
zip
(
range
(
3
,
8
),
anchor_generator_object
.
_anchor_grid_info
):
self
.
assertEqual
(
set
(
anchor_grid_info
.
keys
()),
set
([
'level'
,
'info'
]))
self
.
assertTrue
(
level
,
anchor_grid_info
[
'level'
])
self
.
assertEqual
(
len
(
anchor_grid_info
[
'info'
]),
4
)
self
.
assertAllClose
(
anchor_grid_info
[
'info'
][
0
],
[
2
**
0
,
2
**
0.5
])
self
.
assertTrue
(
anchor_grid_info
[
'info'
][
1
],
1.0
)
self
.
assertAllClose
(
anchor_grid_info
[
'info'
][
2
],
[
4.0
*
2
**
level
,
4.0
*
2
**
level
])
self
.
assertAllClose
(
anchor_grid_info
[
'info'
][
3
],
[
2
**
level
,
2
**
level
])
self
.
assertTrue
(
anchor_generator_object
.
_normalize_coordinates
)
def
test_build_multiscale_anchor_generator_with_anchors_in_pixel_coordinates
(
self
):
anchor_generator_text_proto
=
"""
multiscale_anchor_generator {
aspect_ratios: [1.0]
normalize_coordinates: false
}
"""
anchor_generator_proto
=
anchor_generator_pb2
.
AnchorGenerator
()
text_format
.
Merge
(
anchor_generator_text_proto
,
anchor_generator_proto
)
anchor_generator_object
=
anchor_generator_builder
.
build
(
anchor_generator_proto
)
self
.
assertTrue
(
isinstance
(
anchor_generator_object
,
multiscale_grid_anchor_generator
.
MultiscaleGridAnchorGenerator
))
self
.
assertFalse
(
anchor_generator_object
.
_normalize_coordinates
)
if
__name__
==
'__main__'
:
tf
.
test
.
main
()
research/mlperf_object_detection/Mask_RCNN/object_detection/builders/box_coder_builder.py
deleted
100644 → 0
View file @
ad3526a9
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A function to build an object detection box coder from configuration."""
from
object_detection.box_coders
import
faster_rcnn_box_coder
from
object_detection.box_coders
import
keypoint_box_coder
from
object_detection.box_coders
import
mean_stddev_box_coder
from
object_detection.box_coders
import
square_box_coder
from
object_detection.protos
import
box_coder_pb2
def
build
(
box_coder_config
):
"""Builds a box coder object based on the box coder config.
Args:
box_coder_config: A box_coder.proto object containing the config for the
desired box coder.
Returns:
BoxCoder based on the config.
Raises:
ValueError: On empty box coder proto.
"""
if
not
isinstance
(
box_coder_config
,
box_coder_pb2
.
BoxCoder
):
raise
ValueError
(
'box_coder_config not of type box_coder_pb2.BoxCoder.'
)
if
box_coder_config
.
WhichOneof
(
'box_coder_oneof'
)
==
'faster_rcnn_box_coder'
:
return
faster_rcnn_box_coder
.
FasterRcnnBoxCoder
(
scale_factors
=
[
box_coder_config
.
faster_rcnn_box_coder
.
y_scale
,
box_coder_config
.
faster_rcnn_box_coder
.
x_scale
,
box_coder_config
.
faster_rcnn_box_coder
.
height_scale
,
box_coder_config
.
faster_rcnn_box_coder
.
width_scale
])
if
box_coder_config
.
WhichOneof
(
'box_coder_oneof'
)
==
'keypoint_box_coder'
:
return
keypoint_box_coder
.
KeypointBoxCoder
(
box_coder_config
.
keypoint_box_coder
.
num_keypoints
,
scale_factors
=
[
box_coder_config
.
keypoint_box_coder
.
y_scale
,
box_coder_config
.
keypoint_box_coder
.
x_scale
,
box_coder_config
.
keypoint_box_coder
.
height_scale
,
box_coder_config
.
keypoint_box_coder
.
width_scale
])
if
(
box_coder_config
.
WhichOneof
(
'box_coder_oneof'
)
==
'mean_stddev_box_coder'
):
return
mean_stddev_box_coder
.
MeanStddevBoxCoder
(
stddev
=
box_coder_config
.
mean_stddev_box_coder
.
stddev
)
if
box_coder_config
.
WhichOneof
(
'box_coder_oneof'
)
==
'square_box_coder'
:
return
square_box_coder
.
SquareBoxCoder
(
scale_factors
=
[
box_coder_config
.
square_box_coder
.
y_scale
,
box_coder_config
.
square_box_coder
.
x_scale
,
box_coder_config
.
square_box_coder
.
length_scale
])
raise
ValueError
(
'Empty box coder.'
)
research/mlperf_object_detection/Mask_RCNN/object_detection/builders/box_coder_builder_test.py
deleted
100644 → 0
View file @
ad3526a9
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for box_coder_builder."""
import
tensorflow
as
tf
from
google.protobuf
import
text_format
from
object_detection.box_coders
import
faster_rcnn_box_coder
from
object_detection.box_coders
import
keypoint_box_coder
from
object_detection.box_coders
import
mean_stddev_box_coder
from
object_detection.box_coders
import
square_box_coder
from
object_detection.builders
import
box_coder_builder
from
object_detection.protos
import
box_coder_pb2
class
BoxCoderBuilderTest
(
tf
.
test
.
TestCase
):
def
test_build_faster_rcnn_box_coder_with_defaults
(
self
):
box_coder_text_proto
=
"""
faster_rcnn_box_coder {
}
"""
box_coder_proto
=
box_coder_pb2
.
BoxCoder
()
text_format
.
Merge
(
box_coder_text_proto
,
box_coder_proto
)
box_coder_object
=
box_coder_builder
.
build
(
box_coder_proto
)
self
.
assertIsInstance
(
box_coder_object
,
faster_rcnn_box_coder
.
FasterRcnnBoxCoder
)
self
.
assertEqual
(
box_coder_object
.
_scale_factors
,
[
10.0
,
10.0
,
5.0
,
5.0
])
def
test_build_faster_rcnn_box_coder_with_non_default_parameters
(
self
):
box_coder_text_proto
=
"""
faster_rcnn_box_coder {
y_scale: 6.0
x_scale: 3.0
height_scale: 7.0
width_scale: 8.0
}
"""
box_coder_proto
=
box_coder_pb2
.
BoxCoder
()
text_format
.
Merge
(
box_coder_text_proto
,
box_coder_proto
)
box_coder_object
=
box_coder_builder
.
build
(
box_coder_proto
)
self
.
assertIsInstance
(
box_coder_object
,
faster_rcnn_box_coder
.
FasterRcnnBoxCoder
)
self
.
assertEqual
(
box_coder_object
.
_scale_factors
,
[
6.0
,
3.0
,
7.0
,
8.0
])
def
test_build_keypoint_box_coder_with_defaults
(
self
):
box_coder_text_proto
=
"""
keypoint_box_coder {
}
"""
box_coder_proto
=
box_coder_pb2
.
BoxCoder
()
text_format
.
Merge
(
box_coder_text_proto
,
box_coder_proto
)
box_coder_object
=
box_coder_builder
.
build
(
box_coder_proto
)
self
.
assertIsInstance
(
box_coder_object
,
keypoint_box_coder
.
KeypointBoxCoder
)
self
.
assertEqual
(
box_coder_object
.
_scale_factors
,
[
10.0
,
10.0
,
5.0
,
5.0
])
def
test_build_keypoint_box_coder_with_non_default_parameters
(
self
):
box_coder_text_proto
=
"""
keypoint_box_coder {
num_keypoints: 6
y_scale: 6.0
x_scale: 3.0
height_scale: 7.0
width_scale: 8.0
}
"""
box_coder_proto
=
box_coder_pb2
.
BoxCoder
()
text_format
.
Merge
(
box_coder_text_proto
,
box_coder_proto
)
box_coder_object
=
box_coder_builder
.
build
(
box_coder_proto
)
self
.
assertIsInstance
(
box_coder_object
,
keypoint_box_coder
.
KeypointBoxCoder
)
self
.
assertEqual
(
box_coder_object
.
_num_keypoints
,
6
)
self
.
assertEqual
(
box_coder_object
.
_scale_factors
,
[
6.0
,
3.0
,
7.0
,
8.0
])
def
test_build_mean_stddev_box_coder
(
self
):
box_coder_text_proto
=
"""
mean_stddev_box_coder {
}
"""
box_coder_proto
=
box_coder_pb2
.
BoxCoder
()
text_format
.
Merge
(
box_coder_text_proto
,
box_coder_proto
)
box_coder_object
=
box_coder_builder
.
build
(
box_coder_proto
)
self
.
assertTrue
(
isinstance
(
box_coder_object
,
mean_stddev_box_coder
.
MeanStddevBoxCoder
))
def
test_build_square_box_coder_with_defaults
(
self
):
box_coder_text_proto
=
"""
square_box_coder {
}
"""
box_coder_proto
=
box_coder_pb2
.
BoxCoder
()
text_format
.
Merge
(
box_coder_text_proto
,
box_coder_proto
)
box_coder_object
=
box_coder_builder
.
build
(
box_coder_proto
)
self
.
assertTrue
(
isinstance
(
box_coder_object
,
square_box_coder
.
SquareBoxCoder
))
self
.
assertEqual
(
box_coder_object
.
_scale_factors
,
[
10.0
,
10.0
,
5.0
])
def
test_build_square_box_coder_with_non_default_parameters
(
self
):
box_coder_text_proto
=
"""
square_box_coder {
y_scale: 6.0
x_scale: 3.0
length_scale: 7.0
}
"""
box_coder_proto
=
box_coder_pb2
.
BoxCoder
()
text_format
.
Merge
(
box_coder_text_proto
,
box_coder_proto
)
box_coder_object
=
box_coder_builder
.
build
(
box_coder_proto
)
self
.
assertTrue
(
isinstance
(
box_coder_object
,
square_box_coder
.
SquareBoxCoder
))
self
.
assertEqual
(
box_coder_object
.
_scale_factors
,
[
6.0
,
3.0
,
7.0
])
def
test_raise_error_on_empty_box_coder
(
self
):
box_coder_text_proto
=
"""
"""
box_coder_proto
=
box_coder_pb2
.
BoxCoder
()
text_format
.
Merge
(
box_coder_text_proto
,
box_coder_proto
)
with
self
.
assertRaises
(
ValueError
):
box_coder_builder
.
build
(
box_coder_proto
)
if
__name__
==
'__main__'
:
tf
.
test
.
main
()
research/mlperf_object_detection/Mask_RCNN/object_detection/builders/box_predictor_builder.py
deleted
100644 → 0
View file @
ad3526a9
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Function to build box predictor from configuration."""
from
object_detection.core
import
box_predictor
from
object_detection.protos
import
box_predictor_pb2
def
build
(
argscope_fn
,
box_predictor_config
,
is_training
,
num_classes
):
"""Builds box predictor based on the configuration.
Builds box predictor based on the configuration. See box_predictor.proto for
configurable options. Also, see box_predictor.py for more details.
Args:
argscope_fn: A function that takes the following inputs:
* hyperparams_pb2.Hyperparams proto
* a boolean indicating if the model is in training mode.
and returns a tf slim argscope for Conv and FC hyperparameters.
box_predictor_config: box_predictor_pb2.BoxPredictor proto containing
configuration.
is_training: Whether the models is in training mode.
num_classes: Number of classes to predict.
Returns:
box_predictor: box_predictor.BoxPredictor object.
Raises:
ValueError: On unknown box predictor.
"""
if
not
isinstance
(
box_predictor_config
,
box_predictor_pb2
.
BoxPredictor
):
raise
ValueError
(
'box_predictor_config not of type '
'box_predictor_pb2.BoxPredictor.'
)
box_predictor_oneof
=
box_predictor_config
.
WhichOneof
(
'box_predictor_oneof'
)
if
box_predictor_oneof
==
'convolutional_box_predictor'
:
conv_box_predictor
=
box_predictor_config
.
convolutional_box_predictor
conv_hyperparams_fn
=
argscope_fn
(
conv_box_predictor
.
conv_hyperparams
,
is_training
)
box_predictor_object
=
box_predictor
.
ConvolutionalBoxPredictor
(
is_training
=
is_training
,
num_classes
=
num_classes
,
conv_hyperparams_fn
=
conv_hyperparams_fn
,
min_depth
=
conv_box_predictor
.
min_depth
,
max_depth
=
conv_box_predictor
.
max_depth
,
num_layers_before_predictor
=
(
conv_box_predictor
.
num_layers_before_predictor
),
use_dropout
=
conv_box_predictor
.
use_dropout
,
dropout_keep_prob
=
conv_box_predictor
.
dropout_keep_probability
,
kernel_size
=
conv_box_predictor
.
kernel_size
,
box_code_size
=
conv_box_predictor
.
box_code_size
,
apply_sigmoid_to_scores
=
conv_box_predictor
.
apply_sigmoid_to_scores
,
class_prediction_bias_init
=
(
conv_box_predictor
.
class_prediction_bias_init
),
use_depthwise
=
conv_box_predictor
.
use_depthwise
)
return
box_predictor_object
if
box_predictor_oneof
==
'weight_shared_convolutional_box_predictor'
:
conv_box_predictor
=
(
box_predictor_config
.
weight_shared_convolutional_box_predictor
)
conv_hyperparams_fn
=
argscope_fn
(
conv_box_predictor
.
conv_hyperparams
,
is_training
)
box_predictor_object
=
box_predictor
.
WeightSharedConvolutionalBoxPredictor
(
is_training
=
is_training
,
num_classes
=
num_classes
,
conv_hyperparams_fn
=
conv_hyperparams_fn
,
depth
=
conv_box_predictor
.
depth
,
num_layers_before_predictor
=
(
conv_box_predictor
.
num_layers_before_predictor
),
kernel_size
=
conv_box_predictor
.
kernel_size
,
box_code_size
=
conv_box_predictor
.
box_code_size
,
class_prediction_bias_init
=
conv_box_predictor
.
class_prediction_bias_init
,
use_dropout
=
conv_box_predictor
.
use_dropout
,
dropout_keep_prob
=
conv_box_predictor
.
dropout_keep_probability
)
return
box_predictor_object
if
box_predictor_oneof
==
'mask_rcnn_box_predictor'
:
mask_rcnn_box_predictor
=
box_predictor_config
.
mask_rcnn_box_predictor
fc_hyperparams_fn
=
argscope_fn
(
mask_rcnn_box_predictor
.
fc_hyperparams
,
is_training
)
conv_hyperparams_fn
=
None
if
mask_rcnn_box_predictor
.
HasField
(
'conv_hyperparams'
):
conv_hyperparams_fn
=
argscope_fn
(
mask_rcnn_box_predictor
.
conv_hyperparams
,
is_training
)
box_predictor_object
=
box_predictor
.
MaskRCNNBoxPredictor
(
is_training
=
is_training
,
num_classes
=
num_classes
,
fc_hyperparams_fn
=
fc_hyperparams_fn
,
use_dropout
=
mask_rcnn_box_predictor
.
use_dropout
,
dropout_keep_prob
=
mask_rcnn_box_predictor
.
dropout_keep_probability
,
box_code_size
=
mask_rcnn_box_predictor
.
box_code_size
,
conv_hyperparams_fn
=
conv_hyperparams_fn
,
predict_instance_masks
=
mask_rcnn_box_predictor
.
predict_instance_masks
,
mask_height
=
mask_rcnn_box_predictor
.
mask_height
,
mask_width
=
mask_rcnn_box_predictor
.
mask_width
,
mask_prediction_num_conv_layers
=
(
mask_rcnn_box_predictor
.
mask_prediction_num_conv_layers
),
mask_prediction_conv_depth
=
(
mask_rcnn_box_predictor
.
mask_prediction_conv_depth
),
masks_are_class_agnostic
=
(
mask_rcnn_box_predictor
.
masks_are_class_agnostic
),
predict_keypoints
=
mask_rcnn_box_predictor
.
predict_keypoints
,
share_box_across_classes
=
(
mask_rcnn_box_predictor
.
share_box_across_classes
))
return
box_predictor_object
if
box_predictor_oneof
==
'rfcn_box_predictor'
:
rfcn_box_predictor
=
box_predictor_config
.
rfcn_box_predictor
conv_hyperparams_fn
=
argscope_fn
(
rfcn_box_predictor
.
conv_hyperparams
,
is_training
)
box_predictor_object
=
box_predictor
.
RfcnBoxPredictor
(
is_training
=
is_training
,
num_classes
=
num_classes
,
conv_hyperparams_fn
=
conv_hyperparams_fn
,
crop_size
=
[
rfcn_box_predictor
.
crop_height
,
rfcn_box_predictor
.
crop_width
],
num_spatial_bins
=
[
rfcn_box_predictor
.
num_spatial_bins_height
,
rfcn_box_predictor
.
num_spatial_bins_width
],
depth
=
rfcn_box_predictor
.
depth
,
box_code_size
=
rfcn_box_predictor
.
box_code_size
)
return
box_predictor_object
raise
ValueError
(
'Unknown box predictor: {}'
.
format
(
box_predictor_oneof
))
research/mlperf_object_detection/Mask_RCNN/object_detection/builders/box_predictor_builder_test.py
deleted
100644 → 0
View file @
ad3526a9
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for box_predictor_builder."""
import
mock
import
tensorflow
as
tf
from
google.protobuf
import
text_format
from
object_detection.builders
import
box_predictor_builder
from
object_detection.builders
import
hyperparams_builder
from
object_detection.protos
import
box_predictor_pb2
from
object_detection.protos
import
hyperparams_pb2
class
ConvolutionalBoxPredictorBuilderTest
(
tf
.
test
.
TestCase
):
def
test_box_predictor_calls_conv_argscope_fn
(
self
):
conv_hyperparams_text_proto
=
"""
regularizer {
l1_regularizer {
weight: 0.0003
}
}
initializer {
truncated_normal_initializer {
mean: 0.0
stddev: 0.3
}
}
activation: RELU_6
"""
hyperparams_proto
=
hyperparams_pb2
.
Hyperparams
()
text_format
.
Merge
(
conv_hyperparams_text_proto
,
hyperparams_proto
)
def
mock_conv_argscope_builder
(
conv_hyperparams_arg
,
is_training
):
return
(
conv_hyperparams_arg
,
is_training
)
box_predictor_proto
=
box_predictor_pb2
.
BoxPredictor
()
box_predictor_proto
.
convolutional_box_predictor
.
conv_hyperparams
.
CopyFrom
(
hyperparams_proto
)
box_predictor
=
box_predictor_builder
.
build
(
argscope_fn
=
mock_conv_argscope_builder
,
box_predictor_config
=
box_predictor_proto
,
is_training
=
False
,
num_classes
=
10
)
(
conv_hyperparams_actual
,
is_training
)
=
box_predictor
.
_conv_hyperparams_fn
self
.
assertAlmostEqual
((
hyperparams_proto
.
regularizer
.
l1_regularizer
.
weight
),
(
conv_hyperparams_actual
.
regularizer
.
l1_regularizer
.
weight
))
self
.
assertAlmostEqual
((
hyperparams_proto
.
initializer
.
truncated_normal_initializer
.
stddev
),
(
conv_hyperparams_actual
.
initializer
.
truncated_normal_initializer
.
stddev
))
self
.
assertAlmostEqual
((
hyperparams_proto
.
initializer
.
truncated_normal_initializer
.
mean
),
(
conv_hyperparams_actual
.
initializer
.
truncated_normal_initializer
.
mean
))
self
.
assertEqual
(
hyperparams_proto
.
activation
,
conv_hyperparams_actual
.
activation
)
self
.
assertFalse
(
is_training
)
def
test_construct_non_default_conv_box_predictor
(
self
):
box_predictor_text_proto
=
"""
convolutional_box_predictor {
min_depth: 2
max_depth: 16
num_layers_before_predictor: 2
use_dropout: false
dropout_keep_probability: 0.4
kernel_size: 3
box_code_size: 3
apply_sigmoid_to_scores: true
class_prediction_bias_init: 4.0
use_depthwise: true
}
"""
conv_hyperparams_text_proto
=
"""
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
hyperparams_proto
=
hyperparams_pb2
.
Hyperparams
()
text_format
.
Merge
(
conv_hyperparams_text_proto
,
hyperparams_proto
)
def
mock_conv_argscope_builder
(
conv_hyperparams_arg
,
is_training
):
return
(
conv_hyperparams_arg
,
is_training
)
box_predictor_proto
=
box_predictor_pb2
.
BoxPredictor
()
text_format
.
Merge
(
box_predictor_text_proto
,
box_predictor_proto
)
box_predictor_proto
.
convolutional_box_predictor
.
conv_hyperparams
.
CopyFrom
(
hyperparams_proto
)
box_predictor
=
box_predictor_builder
.
build
(
argscope_fn
=
mock_conv_argscope_builder
,
box_predictor_config
=
box_predictor_proto
,
is_training
=
False
,
num_classes
=
10
)
self
.
assertEqual
(
box_predictor
.
_min_depth
,
2
)
self
.
assertEqual
(
box_predictor
.
_max_depth
,
16
)
self
.
assertEqual
(
box_predictor
.
_num_layers_before_predictor
,
2
)
self
.
assertFalse
(
box_predictor
.
_use_dropout
)
self
.
assertAlmostEqual
(
box_predictor
.
_dropout_keep_prob
,
0.4
)
self
.
assertTrue
(
box_predictor
.
_apply_sigmoid_to_scores
)
self
.
assertAlmostEqual
(
box_predictor
.
_class_prediction_bias_init
,
4.0
)
self
.
assertEqual
(
box_predictor
.
num_classes
,
10
)
self
.
assertFalse
(
box_predictor
.
_is_training
)
self
.
assertTrue
(
box_predictor
.
_use_depthwise
)
def
test_construct_default_conv_box_predictor
(
self
):
box_predictor_text_proto
=
"""
convolutional_box_predictor {
conv_hyperparams {
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
}
}"""
box_predictor_proto
=
box_predictor_pb2
.
BoxPredictor
()
text_format
.
Merge
(
box_predictor_text_proto
,
box_predictor_proto
)
box_predictor
=
box_predictor_builder
.
build
(
argscope_fn
=
hyperparams_builder
.
build
,
box_predictor_config
=
box_predictor_proto
,
is_training
=
True
,
num_classes
=
90
)
self
.
assertEqual
(
box_predictor
.
_min_depth
,
0
)
self
.
assertEqual
(
box_predictor
.
_max_depth
,
0
)
self
.
assertEqual
(
box_predictor
.
_num_layers_before_predictor
,
0
)
self
.
assertTrue
(
box_predictor
.
_use_dropout
)
self
.
assertAlmostEqual
(
box_predictor
.
_dropout_keep_prob
,
0.8
)
self
.
assertFalse
(
box_predictor
.
_apply_sigmoid_to_scores
)
self
.
assertEqual
(
box_predictor
.
num_classes
,
90
)
self
.
assertTrue
(
box_predictor
.
_is_training
)
self
.
assertFalse
(
box_predictor
.
_use_depthwise
)
class
WeightSharedConvolutionalBoxPredictorBuilderTest
(
tf
.
test
.
TestCase
):
def
test_box_predictor_calls_conv_argscope_fn
(
self
):
conv_hyperparams_text_proto
=
"""
regularizer {
l1_regularizer {
weight: 0.0003
}
}
initializer {
truncated_normal_initializer {
mean: 0.0
stddev: 0.3
}
}
activation: RELU_6
"""
hyperparams_proto
=
hyperparams_pb2
.
Hyperparams
()
text_format
.
Merge
(
conv_hyperparams_text_proto
,
hyperparams_proto
)
def
mock_conv_argscope_builder
(
conv_hyperparams_arg
,
is_training
):
return
(
conv_hyperparams_arg
,
is_training
)
box_predictor_proto
=
box_predictor_pb2
.
BoxPredictor
()
(
box_predictor_proto
.
weight_shared_convolutional_box_predictor
.
conv_hyperparams
.
CopyFrom
(
hyperparams_proto
))
box_predictor
=
box_predictor_builder
.
build
(
argscope_fn
=
mock_conv_argscope_builder
,
box_predictor_config
=
box_predictor_proto
,
is_training
=
False
,
num_classes
=
10
)
(
conv_hyperparams_actual
,
is_training
)
=
box_predictor
.
_conv_hyperparams_fn
self
.
assertAlmostEqual
((
hyperparams_proto
.
regularizer
.
l1_regularizer
.
weight
),
(
conv_hyperparams_actual
.
regularizer
.
l1_regularizer
.
weight
))
self
.
assertAlmostEqual
((
hyperparams_proto
.
initializer
.
truncated_normal_initializer
.
stddev
),
(
conv_hyperparams_actual
.
initializer
.
truncated_normal_initializer
.
stddev
))
self
.
assertAlmostEqual
((
hyperparams_proto
.
initializer
.
truncated_normal_initializer
.
mean
),
(
conv_hyperparams_actual
.
initializer
.
truncated_normal_initializer
.
mean
))
self
.
assertEqual
(
hyperparams_proto
.
activation
,
conv_hyperparams_actual
.
activation
)
self
.
assertFalse
(
is_training
)
def
test_construct_non_default_conv_box_predictor
(
self
):
box_predictor_text_proto
=
"""
weight_shared_convolutional_box_predictor {
depth: 2
num_layers_before_predictor: 2
kernel_size: 7
box_code_size: 3
class_prediction_bias_init: 4.0
}
"""
conv_hyperparams_text_proto
=
"""
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
hyperparams_proto
=
hyperparams_pb2
.
Hyperparams
()
text_format
.
Merge
(
conv_hyperparams_text_proto
,
hyperparams_proto
)
def
mock_conv_argscope_builder
(
conv_hyperparams_arg
,
is_training
):
return
(
conv_hyperparams_arg
,
is_training
)
box_predictor_proto
=
box_predictor_pb2
.
BoxPredictor
()
text_format
.
Merge
(
box_predictor_text_proto
,
box_predictor_proto
)
(
box_predictor_proto
.
weight_shared_convolutional_box_predictor
.
conv_hyperparams
.
CopyFrom
(
hyperparams_proto
))
box_predictor
=
box_predictor_builder
.
build
(
argscope_fn
=
mock_conv_argscope_builder
,
box_predictor_config
=
box_predictor_proto
,
is_training
=
False
,
num_classes
=
10
)
self
.
assertEqual
(
box_predictor
.
_depth
,
2
)
self
.
assertEqual
(
box_predictor
.
_num_layers_before_predictor
,
2
)
self
.
assertAlmostEqual
(
box_predictor
.
_class_prediction_bias_init
,
4.0
)
self
.
assertEqual
(
box_predictor
.
num_classes
,
10
)
self
.
assertFalse
(
box_predictor
.
_is_training
)
def
test_construct_default_conv_box_predictor
(
self
):
box_predictor_text_proto
=
"""
weight_shared_convolutional_box_predictor {
conv_hyperparams {
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
}
}"""
box_predictor_proto
=
box_predictor_pb2
.
BoxPredictor
()
text_format
.
Merge
(
box_predictor_text_proto
,
box_predictor_proto
)
box_predictor
=
box_predictor_builder
.
build
(
argscope_fn
=
hyperparams_builder
.
build
,
box_predictor_config
=
box_predictor_proto
,
is_training
=
True
,
num_classes
=
90
)
self
.
assertEqual
(
box_predictor
.
_depth
,
0
)
self
.
assertEqual
(
box_predictor
.
_num_layers_before_predictor
,
0
)
self
.
assertEqual
(
box_predictor
.
num_classes
,
90
)
self
.
assertTrue
(
box_predictor
.
_is_training
)
class
MaskRCNNBoxPredictorBuilderTest
(
tf
.
test
.
TestCase
):
def
test_box_predictor_builder_calls_fc_argscope_fn
(
self
):
fc_hyperparams_text_proto
=
"""
regularizer {
l1_regularizer {
weight: 0.0003
}
}
initializer {
truncated_normal_initializer {
mean: 0.0
stddev: 0.3
}
}
activation: RELU_6
op: FC
"""
hyperparams_proto
=
hyperparams_pb2
.
Hyperparams
()
text_format
.
Merge
(
fc_hyperparams_text_proto
,
hyperparams_proto
)
box_predictor_proto
=
box_predictor_pb2
.
BoxPredictor
()
box_predictor_proto
.
mask_rcnn_box_predictor
.
fc_hyperparams
.
CopyFrom
(
hyperparams_proto
)
mock_argscope_fn
=
mock
.
Mock
(
return_value
=
'arg_scope'
)
box_predictor
=
box_predictor_builder
.
build
(
argscope_fn
=
mock_argscope_fn
,
box_predictor_config
=
box_predictor_proto
,
is_training
=
False
,
num_classes
=
10
)
mock_argscope_fn
.
assert_called_with
(
hyperparams_proto
,
False
)
self
.
assertEqual
(
box_predictor
.
_fc_hyperparams_fn
,
'arg_scope'
)
def
test_non_default_mask_rcnn_box_predictor
(
self
):
fc_hyperparams_text_proto
=
"""
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
activation: RELU_6
op: FC
"""
box_predictor_text_proto
=
"""
mask_rcnn_box_predictor {
use_dropout: true
dropout_keep_probability: 0.8
box_code_size: 3
share_box_across_classes: true
}
"""
hyperparams_proto
=
hyperparams_pb2
.
Hyperparams
()
text_format
.
Merge
(
fc_hyperparams_text_proto
,
hyperparams_proto
)
def
mock_fc_argscope_builder
(
fc_hyperparams_arg
,
is_training
):
return
(
fc_hyperparams_arg
,
is_training
)
box_predictor_proto
=
box_predictor_pb2
.
BoxPredictor
()
text_format
.
Merge
(
box_predictor_text_proto
,
box_predictor_proto
)
box_predictor_proto
.
mask_rcnn_box_predictor
.
fc_hyperparams
.
CopyFrom
(
hyperparams_proto
)
box_predictor
=
box_predictor_builder
.
build
(
argscope_fn
=
mock_fc_argscope_builder
,
box_predictor_config
=
box_predictor_proto
,
is_training
=
True
,
num_classes
=
90
)
self
.
assertTrue
(
box_predictor
.
_use_dropout
)
self
.
assertAlmostEqual
(
box_predictor
.
_dropout_keep_prob
,
0.8
)
self
.
assertEqual
(
box_predictor
.
num_classes
,
90
)
self
.
assertTrue
(
box_predictor
.
_is_training
)
self
.
assertEqual
(
box_predictor
.
_box_code_size
,
3
)
self
.
assertEqual
(
box_predictor
.
_share_box_across_classes
,
True
)
def
test_build_default_mask_rcnn_box_predictor
(
self
):
box_predictor_proto
=
box_predictor_pb2
.
BoxPredictor
()
box_predictor_proto
.
mask_rcnn_box_predictor
.
fc_hyperparams
.
op
=
(
hyperparams_pb2
.
Hyperparams
.
FC
)
box_predictor
=
box_predictor_builder
.
build
(
argscope_fn
=
mock
.
Mock
(
return_value
=
'arg_scope'
),
box_predictor_config
=
box_predictor_proto
,
is_training
=
True
,
num_classes
=
90
)
self
.
assertFalse
(
box_predictor
.
_use_dropout
)
self
.
assertAlmostEqual
(
box_predictor
.
_dropout_keep_prob
,
0.5
)
self
.
assertEqual
(
box_predictor
.
num_classes
,
90
)
self
.
assertTrue
(
box_predictor
.
_is_training
)
self
.
assertEqual
(
box_predictor
.
_box_code_size
,
4
)
self
.
assertFalse
(
box_predictor
.
_predict_instance_masks
)
self
.
assertFalse
(
box_predictor
.
_predict_keypoints
)
def
test_build_box_predictor_with_mask_branch
(
self
):
box_predictor_proto
=
box_predictor_pb2
.
BoxPredictor
()
box_predictor_proto
.
mask_rcnn_box_predictor
.
fc_hyperparams
.
op
=
(
hyperparams_pb2
.
Hyperparams
.
FC
)
box_predictor_proto
.
mask_rcnn_box_predictor
.
conv_hyperparams
.
op
=
(
hyperparams_pb2
.
Hyperparams
.
CONV
)
box_predictor_proto
.
mask_rcnn_box_predictor
.
predict_instance_masks
=
True
box_predictor_proto
.
mask_rcnn_box_predictor
.
mask_prediction_conv_depth
=
512
box_predictor_proto
.
mask_rcnn_box_predictor
.
mask_height
=
16
box_predictor_proto
.
mask_rcnn_box_predictor
.
mask_width
=
16
mock_argscope_fn
=
mock
.
Mock
(
return_value
=
'arg_scope'
)
box_predictor
=
box_predictor_builder
.
build
(
argscope_fn
=
mock_argscope_fn
,
box_predictor_config
=
box_predictor_proto
,
is_training
=
True
,
num_classes
=
90
)
mock_argscope_fn
.
assert_has_calls
(
[
mock
.
call
(
box_predictor_proto
.
mask_rcnn_box_predictor
.
fc_hyperparams
,
True
),
mock
.
call
(
box_predictor_proto
.
mask_rcnn_box_predictor
.
conv_hyperparams
,
True
)],
any_order
=
True
)
self
.
assertFalse
(
box_predictor
.
_use_dropout
)
self
.
assertAlmostEqual
(
box_predictor
.
_dropout_keep_prob
,
0.5
)
self
.
assertEqual
(
box_predictor
.
num_classes
,
90
)
self
.
assertTrue
(
box_predictor
.
_is_training
)
self
.
assertEqual
(
box_predictor
.
_box_code_size
,
4
)
self
.
assertTrue
(
box_predictor
.
_predict_instance_masks
)
self
.
assertEqual
(
box_predictor
.
_mask_prediction_conv_depth
,
512
)
self
.
assertFalse
(
box_predictor
.
_predict_keypoints
)
class
RfcnBoxPredictorBuilderTest
(
tf
.
test
.
TestCase
):
def
test_box_predictor_calls_fc_argscope_fn
(
self
):
conv_hyperparams_text_proto
=
"""
regularizer {
l1_regularizer {
weight: 0.0003
}
}
initializer {
truncated_normal_initializer {
mean: 0.0
stddev: 0.3
}
}
activation: RELU_6
"""
hyperparams_proto
=
hyperparams_pb2
.
Hyperparams
()
text_format
.
Merge
(
conv_hyperparams_text_proto
,
hyperparams_proto
)
def
mock_conv_argscope_builder
(
conv_hyperparams_arg
,
is_training
):
return
(
conv_hyperparams_arg
,
is_training
)
box_predictor_proto
=
box_predictor_pb2
.
BoxPredictor
()
box_predictor_proto
.
rfcn_box_predictor
.
conv_hyperparams
.
CopyFrom
(
hyperparams_proto
)
box_predictor
=
box_predictor_builder
.
build
(
argscope_fn
=
mock_conv_argscope_builder
,
box_predictor_config
=
box_predictor_proto
,
is_training
=
False
,
num_classes
=
10
)
(
conv_hyperparams_actual
,
is_training
)
=
box_predictor
.
_conv_hyperparams_fn
self
.
assertAlmostEqual
((
hyperparams_proto
.
regularizer
.
l1_regularizer
.
weight
),
(
conv_hyperparams_actual
.
regularizer
.
l1_regularizer
.
weight
))
self
.
assertAlmostEqual
((
hyperparams_proto
.
initializer
.
truncated_normal_initializer
.
stddev
),
(
conv_hyperparams_actual
.
initializer
.
truncated_normal_initializer
.
stddev
))
self
.
assertAlmostEqual
((
hyperparams_proto
.
initializer
.
truncated_normal_initializer
.
mean
),
(
conv_hyperparams_actual
.
initializer
.
truncated_normal_initializer
.
mean
))
self
.
assertEqual
(
hyperparams_proto
.
activation
,
conv_hyperparams_actual
.
activation
)
self
.
assertFalse
(
is_training
)
def
test_non_default_rfcn_box_predictor
(
self
):
conv_hyperparams_text_proto
=
"""
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
activation: RELU_6
"""
box_predictor_text_proto
=
"""
rfcn_box_predictor {
num_spatial_bins_height: 4
num_spatial_bins_width: 4
depth: 4
box_code_size: 3
crop_height: 16
crop_width: 16
}
"""
hyperparams_proto
=
hyperparams_pb2
.
Hyperparams
()
text_format
.
Merge
(
conv_hyperparams_text_proto
,
hyperparams_proto
)
def
mock_conv_argscope_builder
(
conv_hyperparams_arg
,
is_training
):
return
(
conv_hyperparams_arg
,
is_training
)
box_predictor_proto
=
box_predictor_pb2
.
BoxPredictor
()
text_format
.
Merge
(
box_predictor_text_proto
,
box_predictor_proto
)
box_predictor_proto
.
rfcn_box_predictor
.
conv_hyperparams
.
CopyFrom
(
hyperparams_proto
)
box_predictor
=
box_predictor_builder
.
build
(
argscope_fn
=
mock_conv_argscope_builder
,
box_predictor_config
=
box_predictor_proto
,
is_training
=
True
,
num_classes
=
90
)
self
.
assertEqual
(
box_predictor
.
num_classes
,
90
)
self
.
assertTrue
(
box_predictor
.
_is_training
)
self
.
assertEqual
(
box_predictor
.
_box_code_size
,
3
)
self
.
assertEqual
(
box_predictor
.
_num_spatial_bins
,
[
4
,
4
])
self
.
assertEqual
(
box_predictor
.
_crop_size
,
[
16
,
16
])
def
test_default_rfcn_box_predictor
(
self
):
conv_hyperparams_text_proto
=
"""
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
activation: RELU_6
"""
hyperparams_proto
=
hyperparams_pb2
.
Hyperparams
()
text_format
.
Merge
(
conv_hyperparams_text_proto
,
hyperparams_proto
)
def
mock_conv_argscope_builder
(
conv_hyperparams_arg
,
is_training
):
return
(
conv_hyperparams_arg
,
is_training
)
box_predictor_proto
=
box_predictor_pb2
.
BoxPredictor
()
box_predictor_proto
.
rfcn_box_predictor
.
conv_hyperparams
.
CopyFrom
(
hyperparams_proto
)
box_predictor
=
box_predictor_builder
.
build
(
argscope_fn
=
mock_conv_argscope_builder
,
box_predictor_config
=
box_predictor_proto
,
is_training
=
True
,
num_classes
=
90
)
self
.
assertEqual
(
box_predictor
.
num_classes
,
90
)
self
.
assertTrue
(
box_predictor
.
_is_training
)
self
.
assertEqual
(
box_predictor
.
_box_code_size
,
4
)
self
.
assertEqual
(
box_predictor
.
_num_spatial_bins
,
[
3
,
3
])
self
.
assertEqual
(
box_predictor
.
_crop_size
,
[
12
,
12
])
if
__name__
==
'__main__'
:
tf
.
test
.
main
()
research/mlperf_object_detection/Mask_RCNN/object_detection/builders/dataset_builder.py
deleted
100644 → 0
View file @
ad3526a9
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""tf.data.Dataset builder.
Creates data sources for DetectionModels from an InputReader config. See
input_reader.proto for options.
Note: If users wishes to also use their own InputReaders with the Object
Detection configuration framework, they should define their own builder function
that wraps the build function.
"""
import
functools
import
tensorflow
as
tf
from
object_detection.core
import
standard_fields
as
fields
from
object_detection.data_decoders
import
tf_example_decoder
from
object_detection.protos
import
input_reader_pb2
from
object_detection.utils
import
dataset_util
def
_get_padding_shapes
(
dataset
,
max_num_boxes
=
None
,
num_classes
=
None
,
spatial_image_shape
=
None
):
"""Returns shapes to pad dataset tensors to before batching.
Args:
dataset: tf.data.Dataset object.
max_num_boxes: Max number of groundtruth boxes needed to computes shapes for
padding.
num_classes: Number of classes in the dataset needed to compute shapes for
padding.
spatial_image_shape: A list of two integers of the form [height, width]
containing expected spatial shape of the image.
Returns:
A dictionary keyed by fields.InputDataFields containing padding shapes for
tensors in the dataset.
Raises:
ValueError: If groundtruth classes is neither rank 1 nor rank 2.
"""
if
not
spatial_image_shape
or
spatial_image_shape
==
[
-
1
,
-
1
]:
height
,
width
=
None
,
None
else
:
height
,
width
=
spatial_image_shape
# pylint: disable=unpacking-non-sequence
num_additional_channels
=
0
if
fields
.
InputDataFields
.
image_additional_channels
in
dataset
.
output_shapes
:
num_additional_channels
=
dataset
.
output_shapes
[
fields
.
InputDataFields
.
image_additional_channels
].
dims
[
2
].
value
padding_shapes
=
{
# Additional channels are merged before batching.
fields
.
InputDataFields
.
image
:
[
height
,
width
,
3
+
num_additional_channels
],
fields
.
InputDataFields
.
image_additional_channels
:
[
height
,
width
,
num_additional_channels
],
fields
.
InputDataFields
.
source_id
:
[],
fields
.
InputDataFields
.
filename
:
[],
fields
.
InputDataFields
.
key
:
[],
fields
.
InputDataFields
.
groundtruth_difficult
:
[
max_num_boxes
],
fields
.
InputDataFields
.
groundtruth_boxes
:
[
max_num_boxes
,
4
],
fields
.
InputDataFields
.
groundtruth_instance_masks
:
[
max_num_boxes
,
height
,
width
],
fields
.
InputDataFields
.
groundtruth_is_crowd
:
[
max_num_boxes
],
fields
.
InputDataFields
.
groundtruth_group_of
:
[
max_num_boxes
],
fields
.
InputDataFields
.
groundtruth_area
:
[
max_num_boxes
],
fields
.
InputDataFields
.
groundtruth_weights
:
[
max_num_boxes
],
fields
.
InputDataFields
.
num_groundtruth_boxes
:
[],
fields
.
InputDataFields
.
groundtruth_label_types
:
[
max_num_boxes
],
fields
.
InputDataFields
.
groundtruth_label_scores
:
[
max_num_boxes
],
fields
.
InputDataFields
.
true_image_shape
:
[
3
],
fields
.
InputDataFields
.
multiclass_scores
:
[
max_num_boxes
,
num_classes
+
1
if
num_classes
is
not
None
else
None
],
}
# Determine whether groundtruth_classes are integers or one-hot encodings, and
# apply batching appropriately.
classes_shape
=
dataset
.
output_shapes
[
fields
.
InputDataFields
.
groundtruth_classes
]
if
len
(
classes_shape
)
==
1
:
# Class integers.
padding_shapes
[
fields
.
InputDataFields
.
groundtruth_classes
]
=
[
max_num_boxes
]
elif
len
(
classes_shape
)
==
2
:
# One-hot or k-hot encoding.
padding_shapes
[
fields
.
InputDataFields
.
groundtruth_classes
]
=
[
max_num_boxes
,
num_classes
]
else
:
raise
ValueError
(
'Groundtruth classes must be a rank 1 tensor (classes) or '
'rank 2 tensor (one-hot encodings)'
)
if
fields
.
InputDataFields
.
original_image
in
dataset
.
output_shapes
:
padding_shapes
[
fields
.
InputDataFields
.
original_image
]
=
[
None
,
None
,
3
+
num_additional_channels
]
if
fields
.
InputDataFields
.
groundtruth_keypoints
in
dataset
.
output_shapes
:
tensor_shape
=
dataset
.
output_shapes
[
fields
.
InputDataFields
.
groundtruth_keypoints
]
padding_shape
=
[
max_num_boxes
,
tensor_shape
[
1
].
value
,
tensor_shape
[
2
].
value
]
padding_shapes
[
fields
.
InputDataFields
.
groundtruth_keypoints
]
=
padding_shape
if
(
fields
.
InputDataFields
.
groundtruth_keypoint_visibilities
in
dataset
.
output_shapes
):
tensor_shape
=
dataset
.
output_shapes
[
fields
.
InputDataFields
.
groundtruth_keypoint_visibilities
]
padding_shape
=
[
max_num_boxes
,
tensor_shape
[
1
].
value
]
padding_shapes
[
fields
.
InputDataFields
.
groundtruth_keypoint_visibilities
]
=
padding_shape
return
{
tensor_key
:
padding_shapes
[
tensor_key
]
for
tensor_key
,
_
in
dataset
.
output_shapes
.
items
()}
def
build
(
input_reader_config
,
transform_input_data_fn
=
None
,
batch_size
=
None
,
max_num_boxes
=
None
,
num_classes
=
None
,
spatial_image_shape
=
None
,
num_additional_channels
=
0
):
"""Builds a tf.data.Dataset.
Builds a tf.data.Dataset by applying the `transform_input_data_fn` on all
records. Applies a padded batch to the resulting dataset.
Args:
input_reader_config: A input_reader_pb2.InputReader object.
transform_input_data_fn: Function to apply to all records, or None if
no extra decoding is required.
batch_size: Batch size. If None, batching is not performed.
max_num_boxes: Max number of groundtruth boxes needed to compute shapes for
padding. If None, will use a dynamic shape.
num_classes: Number of classes in the dataset needed to compute shapes for
padding. If None, will use a dynamic shape.
spatial_image_shape: A list of two integers of the form [height, width]
containing expected spatial shape of the image after applying
transform_input_data_fn. If None, will use dynamic shapes.
num_additional_channels: Number of additional channels to use in the input.
Returns:
A tf.data.Dataset based on the input_reader_config.
Raises:
ValueError: On invalid input reader proto.
ValueError: If no input paths are specified.
"""
if
not
isinstance
(
input_reader_config
,
input_reader_pb2
.
InputReader
):
raise
ValueError
(
'input_reader_config not of type '
'input_reader_pb2.InputReader.'
)
if
input_reader_config
.
WhichOneof
(
'input_reader'
)
==
'tf_record_input_reader'
:
config
=
input_reader_config
.
tf_record_input_reader
if
not
config
.
input_path
:
raise
ValueError
(
'At least one input path must be specified in '
'`input_reader_config`.'
)
label_map_proto_file
=
None
if
input_reader_config
.
HasField
(
'label_map_path'
):
label_map_proto_file
=
input_reader_config
.
label_map_path
decoder
=
tf_example_decoder
.
TfExampleDecoder
(
load_instance_masks
=
input_reader_config
.
load_instance_masks
,
instance_mask_type
=
input_reader_config
.
mask_type
,
label_map_proto_file
=
label_map_proto_file
,
use_display_name
=
input_reader_config
.
use_display_name
,
num_additional_channels
=
num_additional_channels
)
def
process_fn
(
value
):
processed
=
decoder
.
decode
(
value
)
if
transform_input_data_fn
is
not
None
:
return
transform_input_data_fn
(
processed
)
return
processed
dataset
=
dataset_util
.
read_dataset
(
functools
.
partial
(
tf
.
data
.
TFRecordDataset
,
buffer_size
=
8
*
1000
*
1000
),
process_fn
,
config
.
input_path
[:],
input_reader_config
)
if
batch_size
:
padding_shapes
=
_get_padding_shapes
(
dataset
,
max_num_boxes
,
num_classes
,
spatial_image_shape
)
dataset
=
dataset
.
apply
(
tf
.
contrib
.
data
.
padded_batch_and_drop_remainder
(
batch_size
,
padding_shapes
))
return
dataset
raise
ValueError
(
'Unsupported input_reader_config.'
)
research/mlperf_object_detection/Mask_RCNN/object_detection/builders/dataset_builder_test.py
deleted
100644 → 0
View file @
ad3526a9
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for dataset_builder."""
import
os
import
numpy
as
np
import
tensorflow
as
tf
from
google.protobuf
import
text_format
from
tensorflow.core.example
import
example_pb2
from
tensorflow.core.example
import
feature_pb2
from
object_detection.builders
import
dataset_builder
from
object_detection.core
import
standard_fields
as
fields
from
object_detection.protos
import
input_reader_pb2
from
object_detection.utils
import
dataset_util
class
DatasetBuilderTest
(
tf
.
test
.
TestCase
):
def
create_tf_record
(
self
,
has_additional_channels
=
False
):
path
=
os
.
path
.
join
(
self
.
get_temp_dir
(),
'tfrecord'
)
writer
=
tf
.
python_io
.
TFRecordWriter
(
path
)
image_tensor
=
np
.
random
.
randint
(
255
,
size
=
(
4
,
5
,
3
)).
astype
(
np
.
uint8
)
additional_channels_tensor
=
np
.
random
.
randint
(
255
,
size
=
(
4
,
5
,
1
)).
astype
(
np
.
uint8
)
flat_mask
=
(
4
*
5
)
*
[
1.0
]
with
self
.
test_session
():
encoded_jpeg
=
tf
.
image
.
encode_jpeg
(
tf
.
constant
(
image_tensor
)).
eval
()
encoded_additional_channels_jpeg
=
tf
.
image
.
encode_jpeg
(
tf
.
constant
(
additional_channels_tensor
)).
eval
()
features
=
{
'image/encoded'
:
feature_pb2
.
Feature
(
bytes_list
=
feature_pb2
.
BytesList
(
value
=
[
encoded_jpeg
])),
'image/format'
:
feature_pb2
.
Feature
(
bytes_list
=
feature_pb2
.
BytesList
(
value
=
[
'jpeg'
.
encode
(
'utf-8'
)])
),
'image/height'
:
feature_pb2
.
Feature
(
int64_list
=
feature_pb2
.
Int64List
(
value
=
[
4
])),
'image/width'
:
feature_pb2
.
Feature
(
int64_list
=
feature_pb2
.
Int64List
(
value
=
[
5
])),
'image/object/bbox/xmin'
:
feature_pb2
.
Feature
(
float_list
=
feature_pb2
.
FloatList
(
value
=
[
0.0
])),
'image/object/bbox/xmax'
:
feature_pb2
.
Feature
(
float_list
=
feature_pb2
.
FloatList
(
value
=
[
1.0
])),
'image/object/bbox/ymin'
:
feature_pb2
.
Feature
(
float_list
=
feature_pb2
.
FloatList
(
value
=
[
0.0
])),
'image/object/bbox/ymax'
:
feature_pb2
.
Feature
(
float_list
=
feature_pb2
.
FloatList
(
value
=
[
1.0
])),
'image/object/class/label'
:
feature_pb2
.
Feature
(
int64_list
=
feature_pb2
.
Int64List
(
value
=
[
2
])),
'image/object/mask'
:
feature_pb2
.
Feature
(
float_list
=
feature_pb2
.
FloatList
(
value
=
flat_mask
)),
}
if
has_additional_channels
:
features
[
'image/additional_channels/encoded'
]
=
feature_pb2
.
Feature
(
bytes_list
=
feature_pb2
.
BytesList
(
value
=
[
encoded_additional_channels_jpeg
]
*
2
))
example
=
example_pb2
.
Example
(
features
=
feature_pb2
.
Features
(
feature
=
features
))
writer
.
write
(
example
.
SerializeToString
())
writer
.
close
()
return
path
def
test_build_tf_record_input_reader
(
self
):
tf_record_path
=
self
.
create_tf_record
()
input_reader_text_proto
=
"""
shuffle: false
num_readers: 1
tf_record_input_reader {{
input_path: '{0}'
}}
"""
.
format
(
tf_record_path
)
input_reader_proto
=
input_reader_pb2
.
InputReader
()
text_format
.
Merge
(
input_reader_text_proto
,
input_reader_proto
)
tensor_dict
=
dataset_util
.
make_initializable_iterator
(
dataset_builder
.
build
(
input_reader_proto
,
batch_size
=
1
)).
get_next
()
sv
=
tf
.
train
.
Supervisor
(
logdir
=
self
.
get_temp_dir
())
with
sv
.
prepare_or_wait_for_session
()
as
sess
:
sv
.
start_queue_runners
(
sess
)
output_dict
=
sess
.
run
(
tensor_dict
)
self
.
assertTrue
(
fields
.
InputDataFields
.
groundtruth_instance_masks
not
in
output_dict
)
self
.
assertEquals
((
1
,
4
,
5
,
3
),
output_dict
[
fields
.
InputDataFields
.
image
].
shape
)
self
.
assertAllEqual
([[
2
]],
output_dict
[
fields
.
InputDataFields
.
groundtruth_classes
])
self
.
assertEquals
(
(
1
,
1
,
4
),
output_dict
[
fields
.
InputDataFields
.
groundtruth_boxes
].
shape
)
self
.
assertAllEqual
(
[
0.0
,
0.0
,
1.0
,
1.0
],
output_dict
[
fields
.
InputDataFields
.
groundtruth_boxes
][
0
][
0
])
def
test_build_tf_record_input_reader_and_load_instance_masks
(
self
):
tf_record_path
=
self
.
create_tf_record
()
input_reader_text_proto
=
"""
shuffle: false
num_readers: 1
load_instance_masks: true
tf_record_input_reader {{
input_path: '{0}'
}}
"""
.
format
(
tf_record_path
)
input_reader_proto
=
input_reader_pb2
.
InputReader
()
text_format
.
Merge
(
input_reader_text_proto
,
input_reader_proto
)
tensor_dict
=
dataset_util
.
make_initializable_iterator
(
dataset_builder
.
build
(
input_reader_proto
,
batch_size
=
1
)).
get_next
()
sv
=
tf
.
train
.
Supervisor
(
logdir
=
self
.
get_temp_dir
())
with
sv
.
prepare_or_wait_for_session
()
as
sess
:
sv
.
start_queue_runners
(
sess
)
output_dict
=
sess
.
run
(
tensor_dict
)
self
.
assertAllEqual
(
(
1
,
1
,
4
,
5
),
output_dict
[
fields
.
InputDataFields
.
groundtruth_instance_masks
].
shape
)
def
test_build_tf_record_input_reader_with_batch_size_two
(
self
):
tf_record_path
=
self
.
create_tf_record
()
input_reader_text_proto
=
"""
shuffle: false
num_readers: 1
tf_record_input_reader {{
input_path: '{0}'
}}
"""
.
format
(
tf_record_path
)
input_reader_proto
=
input_reader_pb2
.
InputReader
()
text_format
.
Merge
(
input_reader_text_proto
,
input_reader_proto
)
def
one_hot_class_encoding_fn
(
tensor_dict
):
tensor_dict
[
fields
.
InputDataFields
.
groundtruth_classes
]
=
tf
.
one_hot
(
tensor_dict
[
fields
.
InputDataFields
.
groundtruth_classes
]
-
1
,
depth
=
3
)
return
tensor_dict
tensor_dict
=
dataset_util
.
make_initializable_iterator
(
dataset_builder
.
build
(
input_reader_proto
,
transform_input_data_fn
=
one_hot_class_encoding_fn
,
batch_size
=
2
,
max_num_boxes
=
2
,
num_classes
=
3
,
spatial_image_shape
=
[
4
,
5
])).
get_next
()
sv
=
tf
.
train
.
Supervisor
(
logdir
=
self
.
get_temp_dir
())
with
sv
.
prepare_or_wait_for_session
()
as
sess
:
sv
.
start_queue_runners
(
sess
)
output_dict
=
sess
.
run
(
tensor_dict
)
self
.
assertAllEqual
([
2
,
4
,
5
,
3
],
output_dict
[
fields
.
InputDataFields
.
image
].
shape
)
self
.
assertAllEqual
([
2
,
2
,
3
],
output_dict
[
fields
.
InputDataFields
.
groundtruth_classes
].
shape
)
self
.
assertAllEqual
([
2
,
2
,
4
],
output_dict
[
fields
.
InputDataFields
.
groundtruth_boxes
].
shape
)
self
.
assertAllEqual
(
[[[
0.0
,
0.0
,
1.0
,
1.0
],
[
0.0
,
0.0
,
0.0
,
0.0
]],
[[
0.0
,
0.0
,
1.0
,
1.0
],
[
0.0
,
0.0
,
0.0
,
0.0
]]],
output_dict
[
fields
.
InputDataFields
.
groundtruth_boxes
])
def
test_build_tf_record_input_reader_with_batch_size_two_and_masks
(
self
):
tf_record_path
=
self
.
create_tf_record
()
input_reader_text_proto
=
"""
shuffle: false
num_readers: 1
load_instance_masks: true
tf_record_input_reader {{
input_path: '{0}'
}}
"""
.
format
(
tf_record_path
)
input_reader_proto
=
input_reader_pb2
.
InputReader
()
text_format
.
Merge
(
input_reader_text_proto
,
input_reader_proto
)
def
one_hot_class_encoding_fn
(
tensor_dict
):
tensor_dict
[
fields
.
InputDataFields
.
groundtruth_classes
]
=
tf
.
one_hot
(
tensor_dict
[
fields
.
InputDataFields
.
groundtruth_classes
]
-
1
,
depth
=
3
)
return
tensor_dict
tensor_dict
=
dataset_util
.
make_initializable_iterator
(
dataset_builder
.
build
(
input_reader_proto
,
transform_input_data_fn
=
one_hot_class_encoding_fn
,
batch_size
=
2
,
max_num_boxes
=
2
,
num_classes
=
3
,
spatial_image_shape
=
[
4
,
5
])).
get_next
()
sv
=
tf
.
train
.
Supervisor
(
logdir
=
self
.
get_temp_dir
())
with
sv
.
prepare_or_wait_for_session
()
as
sess
:
sv
.
start_queue_runners
(
sess
)
output_dict
=
sess
.
run
(
tensor_dict
)
self
.
assertAllEqual
(
[
2
,
2
,
4
,
5
],
output_dict
[
fields
.
InputDataFields
.
groundtruth_instance_masks
].
shape
)
def
test_build_tf_record_input_reader_with_additional_channels
(
self
):
tf_record_path
=
self
.
create_tf_record
(
has_additional_channels
=
True
)
input_reader_text_proto
=
"""
shuffle: false
num_readers: 1
tf_record_input_reader {{
input_path: '{0}'
}}
"""
.
format
(
tf_record_path
)
input_reader_proto
=
input_reader_pb2
.
InputReader
()
text_format
.
Merge
(
input_reader_text_proto
,
input_reader_proto
)
tensor_dict
=
dataset_util
.
make_initializable_iterator
(
dataset_builder
.
build
(
input_reader_proto
,
batch_size
=
2
,
num_additional_channels
=
2
)).
get_next
()
sv
=
tf
.
train
.
Supervisor
(
logdir
=
self
.
get_temp_dir
())
with
sv
.
prepare_or_wait_for_session
()
as
sess
:
sv
.
start_queue_runners
(
sess
)
output_dict
=
sess
.
run
(
tensor_dict
)
self
.
assertEquals
((
2
,
4
,
5
,
5
),
output_dict
[
fields
.
InputDataFields
.
image
].
shape
)
def
test_raises_error_with_no_input_paths
(
self
):
input_reader_text_proto
=
"""
shuffle: false
num_readers: 1
load_instance_masks: true
"""
input_reader_proto
=
input_reader_pb2
.
InputReader
()
text_format
.
Merge
(
input_reader_text_proto
,
input_reader_proto
)
with
self
.
assertRaises
(
ValueError
):
dataset_builder
.
build
(
input_reader_proto
)
if
__name__
==
'__main__'
:
tf
.
test
.
main
()
research/mlperf_object_detection/Mask_RCNN/object_detection/builders/graph_rewriter_builder.py
deleted
100644 → 0
View file @
ad3526a9
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for quantized training and evaluation."""
import
tensorflow
as
tf
def
build
(
graph_rewriter_config
,
is_training
):
"""Returns a function that modifies default graph based on options.
Args:
graph_rewriter_config: graph_rewriter_pb2.GraphRewriter proto.
is_training: whether in training of eval mode.
"""
def
graph_rewrite_fn
():
"""Function to quantize weights and activation of the default graph."""
if
(
graph_rewriter_config
.
quantization
.
weight_bits
!=
8
or
graph_rewriter_config
.
quantization
.
activation_bits
!=
8
):
raise
ValueError
(
'Only 8bit quantization is supported'
)
# Quantize the graph by inserting quantize ops for weights and activations
if
is_training
:
tf
.
contrib
.
quantize
.
create_training_graph
(
input_graph
=
tf
.
get_default_graph
(),
quant_delay
=
graph_rewriter_config
.
quantization
.
delay
)
else
:
tf
.
contrib
.
quantize
.
create_eval_graph
(
input_graph
=
tf
.
get_default_graph
())
tf
.
contrib
.
layers
.
summarize_collection
(
'quant_vars'
)
return
graph_rewrite_fn
research/mlperf_object_detection/Mask_RCNN/object_detection/builders/graph_rewriter_builder_test.py
deleted
100644 → 0
View file @
ad3526a9
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for graph_rewriter_builder."""
import
mock
import
tensorflow
as
tf
from
object_detection.builders
import
graph_rewriter_builder
from
object_detection.protos
import
graph_rewriter_pb2
class
QuantizationBuilderTest
(
tf
.
test
.
TestCase
):
def
testQuantizationBuilderSetsUpCorrectTrainArguments
(
self
):
with
mock
.
patch
.
object
(
tf
.
contrib
.
quantize
,
'create_training_graph'
)
as
mock_quant_fn
:
with
mock
.
patch
.
object
(
tf
.
contrib
.
layers
,
'summarize_collection'
)
as
mock_summarize_col
:
graph_rewriter_proto
=
graph_rewriter_pb2
.
GraphRewriter
()
graph_rewriter_proto
.
quantization
.
delay
=
10
graph_rewriter_proto
.
quantization
.
weight_bits
=
8
graph_rewriter_proto
.
quantization
.
activation_bits
=
8
graph_rewrite_fn
=
graph_rewriter_builder
.
build
(
graph_rewriter_proto
,
is_training
=
True
)
graph_rewrite_fn
()
_
,
kwargs
=
mock_quant_fn
.
call_args
self
.
assertEqual
(
kwargs
[
'input_graph'
],
tf
.
get_default_graph
())
self
.
assertEqual
(
kwargs
[
'quant_delay'
],
10
)
mock_summarize_col
.
assert_called_with
(
'quant_vars'
)
def
testQuantizationBuilderSetsUpCorrectEvalArguments
(
self
):
with
mock
.
patch
.
object
(
tf
.
contrib
.
quantize
,
'create_eval_graph'
)
as
mock_quant_fn
:
with
mock
.
patch
.
object
(
tf
.
contrib
.
layers
,
'summarize_collection'
)
as
mock_summarize_col
:
graph_rewriter_proto
=
graph_rewriter_pb2
.
GraphRewriter
()
graph_rewriter_proto
.
quantization
.
delay
=
10
graph_rewrite_fn
=
graph_rewriter_builder
.
build
(
graph_rewriter_proto
,
is_training
=
False
)
graph_rewrite_fn
()
_
,
kwargs
=
mock_quant_fn
.
call_args
self
.
assertEqual
(
kwargs
[
'input_graph'
],
tf
.
get_default_graph
())
mock_summarize_col
.
assert_called_with
(
'quant_vars'
)
if
__name__
==
'__main__'
:
tf
.
test
.
main
()
research/mlperf_object_detection/Mask_RCNN/object_detection/builders/hyperparams_builder.py
deleted
100644 → 0
View file @
ad3526a9
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Builder function to construct tf-slim arg_scope for convolution, fc ops."""
import
tensorflow
as
tf
from
object_detection.protos
import
hyperparams_pb2
from
object_detection.utils
import
context_manager
slim
=
tf
.
contrib
.
slim
def
build
(
hyperparams_config
,
is_training
):
"""Builds tf-slim arg_scope for convolution ops based on the config.
Returns an arg_scope to use for convolution ops containing weights
initializer, weights regularizer, activation function, batch norm function
and batch norm parameters based on the configuration.
Note that if the batch_norm parameteres are not specified in the config
(i.e. left to default) then batch norm is excluded from the arg_scope.
The batch norm parameters are set for updates based on `is_training` argument
and conv_hyperparams_config.batch_norm.train parameter. During training, they
are updated only if batch_norm.train parameter is true. However, during eval,
no updates are made to the batch norm variables. In both cases, their current
values are used during forward pass.
Args:
hyperparams_config: hyperparams.proto object containing
hyperparameters.
is_training: Whether the network is in training mode.
Returns:
arg_scope_fn: A function to construct tf-slim arg_scope containing
hyperparameters for ops.
Raises:
ValueError: if hyperparams_config is not of type hyperparams.Hyperparams.
"""
if
not
isinstance
(
hyperparams_config
,
hyperparams_pb2
.
Hyperparams
):
raise
ValueError
(
'hyperparams_config not of type '
'hyperparams_pb.Hyperparams.'
)
batch_norm
=
None
batch_norm_params
=
None
if
hyperparams_config
.
HasField
(
'batch_norm'
):
batch_norm
=
slim
.
batch_norm
batch_norm_params
=
_build_batch_norm_params
(
hyperparams_config
.
batch_norm
,
is_training
)
affected_ops
=
[
slim
.
conv2d
,
slim
.
separable_conv2d
,
slim
.
conv2d_transpose
]
if
hyperparams_config
.
HasField
(
'op'
)
and
(
hyperparams_config
.
op
==
hyperparams_pb2
.
Hyperparams
.
FC
):
affected_ops
=
[
slim
.
fully_connected
]
def
scope_fn
():
with
(
slim
.
arg_scope
([
slim
.
batch_norm
],
**
batch_norm_params
)
if
batch_norm_params
is
not
None
else
context_manager
.
IdentityContextManager
()):
with
slim
.
arg_scope
(
affected_ops
,
weights_regularizer
=
_build_regularizer
(
hyperparams_config
.
regularizer
),
weights_initializer
=
_build_initializer
(
hyperparams_config
.
initializer
),
activation_fn
=
_build_activation_fn
(
hyperparams_config
.
activation
),
normalizer_fn
=
batch_norm
)
as
sc
:
return
sc
return
scope_fn
def
_build_activation_fn
(
activation_fn
):
"""Builds a callable activation from config.
Args:
activation_fn: hyperparams_pb2.Hyperparams.activation
Returns:
Callable activation function.
Raises:
ValueError: On unknown activation function.
"""
if
activation_fn
==
hyperparams_pb2
.
Hyperparams
.
NONE
:
return
None
if
activation_fn
==
hyperparams_pb2
.
Hyperparams
.
RELU
:
return
tf
.
nn
.
relu
if
activation_fn
==
hyperparams_pb2
.
Hyperparams
.
RELU_6
:
return
tf
.
nn
.
relu6
raise
ValueError
(
'Unknown activation function: {}'
.
format
(
activation_fn
))
def
_build_regularizer
(
regularizer
):
"""Builds a tf-slim regularizer from config.
Args:
regularizer: hyperparams_pb2.Hyperparams.regularizer proto.
Returns:
tf-slim regularizer.
Raises:
ValueError: On unknown regularizer.
"""
regularizer_oneof
=
regularizer
.
WhichOneof
(
'regularizer_oneof'
)
if
regularizer_oneof
==
'l1_regularizer'
:
return
slim
.
l1_regularizer
(
scale
=
float
(
regularizer
.
l1_regularizer
.
weight
))
if
regularizer_oneof
==
'l2_regularizer'
:
return
slim
.
l2_regularizer
(
scale
=
float
(
regularizer
.
l2_regularizer
.
weight
))
raise
ValueError
(
'Unknown regularizer function: {}'
.
format
(
regularizer_oneof
))
def
_build_initializer
(
initializer
):
"""Build a tf initializer from config.
Args:
initializer: hyperparams_pb2.Hyperparams.regularizer proto.
Returns:
tf initializer.
Raises:
ValueError: On unknown initializer.
"""
initializer_oneof
=
initializer
.
WhichOneof
(
'initializer_oneof'
)
if
initializer_oneof
==
'truncated_normal_initializer'
:
return
tf
.
truncated_normal_initializer
(
mean
=
initializer
.
truncated_normal_initializer
.
mean
,
stddev
=
initializer
.
truncated_normal_initializer
.
stddev
)
if
initializer_oneof
==
'random_normal_initializer'
:
return
tf
.
random_normal_initializer
(
mean
=
initializer
.
random_normal_initializer
.
mean
,
stddev
=
initializer
.
random_normal_initializer
.
stddev
)
if
initializer_oneof
==
'variance_scaling_initializer'
:
enum_descriptor
=
(
hyperparams_pb2
.
VarianceScalingInitializer
.
DESCRIPTOR
.
enum_types_by_name
[
'Mode'
])
mode
=
enum_descriptor
.
values_by_number
[
initializer
.
variance_scaling_initializer
.
mode
].
name
return
slim
.
variance_scaling_initializer
(
factor
=
initializer
.
variance_scaling_initializer
.
factor
,
mode
=
mode
,
uniform
=
initializer
.
variance_scaling_initializer
.
uniform
)
raise
ValueError
(
'Unknown initializer function: {}'
.
format
(
initializer_oneof
))
def
_build_batch_norm_params
(
batch_norm
,
is_training
):
"""Build a dictionary of batch_norm params from config.
Args:
batch_norm: hyperparams_pb2.ConvHyperparams.batch_norm proto.
is_training: Whether the models is in training mode.
Returns:
A dictionary containing batch_norm parameters.
"""
batch_norm_params
=
{
'decay'
:
batch_norm
.
decay
,
'center'
:
batch_norm
.
center
,
'scale'
:
batch_norm
.
scale
,
'epsilon'
:
batch_norm
.
epsilon
,
# Remove is_training parameter from here and deprecate it in the proto
# once we refactor Faster RCNN models to set is_training through an outer
# arg_scope in the meta architecture.
'is_training'
:
is_training
and
batch_norm
.
train
,
}
return
batch_norm_params
research/mlperf_object_detection/Mask_RCNN/object_detection/builders/hyperparams_builder_test.py
deleted
100644 → 0
View file @
ad3526a9
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests object_detection.core.hyperparams_builder."""
import
numpy
as
np
import
tensorflow
as
tf
from
google.protobuf
import
text_format
from
object_detection.builders
import
hyperparams_builder
from
object_detection.protos
import
hyperparams_pb2
slim
=
tf
.
contrib
.
slim
def
_get_scope_key
(
op
):
return
getattr
(
op
,
'_key_op'
,
str
(
op
))
class
HyperparamsBuilderTest
(
tf
.
test
.
TestCase
):
def
test_default_arg_scope_has_conv2d_op
(
self
):
conv_hyperparams_text_proto
=
"""
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto
=
hyperparams_pb2
.
Hyperparams
()
text_format
.
Merge
(
conv_hyperparams_text_proto
,
conv_hyperparams_proto
)
scope_fn
=
hyperparams_builder
.
build
(
conv_hyperparams_proto
,
is_training
=
True
)
scope
=
scope_fn
()
self
.
assertTrue
(
_get_scope_key
(
slim
.
conv2d
)
in
scope
)
def
test_default_arg_scope_has_separable_conv2d_op
(
self
):
conv_hyperparams_text_proto
=
"""
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto
=
hyperparams_pb2
.
Hyperparams
()
text_format
.
Merge
(
conv_hyperparams_text_proto
,
conv_hyperparams_proto
)
scope_fn
=
hyperparams_builder
.
build
(
conv_hyperparams_proto
,
is_training
=
True
)
scope
=
scope_fn
()
self
.
assertTrue
(
_get_scope_key
(
slim
.
separable_conv2d
)
in
scope
)
def
test_default_arg_scope_has_conv2d_transpose_op
(
self
):
conv_hyperparams_text_proto
=
"""
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto
=
hyperparams_pb2
.
Hyperparams
()
text_format
.
Merge
(
conv_hyperparams_text_proto
,
conv_hyperparams_proto
)
scope_fn
=
hyperparams_builder
.
build
(
conv_hyperparams_proto
,
is_training
=
True
)
scope
=
scope_fn
()
self
.
assertTrue
(
_get_scope_key
(
slim
.
conv2d_transpose
)
in
scope
)
def
test_explicit_fc_op_arg_scope_has_fully_connected_op
(
self
):
conv_hyperparams_text_proto
=
"""
op: FC
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto
=
hyperparams_pb2
.
Hyperparams
()
text_format
.
Merge
(
conv_hyperparams_text_proto
,
conv_hyperparams_proto
)
scope_fn
=
hyperparams_builder
.
build
(
conv_hyperparams_proto
,
is_training
=
True
)
scope
=
scope_fn
()
self
.
assertTrue
(
_get_scope_key
(
slim
.
fully_connected
)
in
scope
)
def
test_separable_conv2d_and_conv2d_and_transpose_have_same_parameters
(
self
):
conv_hyperparams_text_proto
=
"""
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto
=
hyperparams_pb2
.
Hyperparams
()
text_format
.
Merge
(
conv_hyperparams_text_proto
,
conv_hyperparams_proto
)
scope_fn
=
hyperparams_builder
.
build
(
conv_hyperparams_proto
,
is_training
=
True
)
scope
=
scope_fn
()
kwargs_1
,
kwargs_2
,
kwargs_3
=
scope
.
values
()
self
.
assertDictEqual
(
kwargs_1
,
kwargs_2
)
self
.
assertDictEqual
(
kwargs_1
,
kwargs_3
)
def
test_return_l1_regularized_weights
(
self
):
conv_hyperparams_text_proto
=
"""
regularizer {
l1_regularizer {
weight: 0.5
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto
=
hyperparams_pb2
.
Hyperparams
()
text_format
.
Merge
(
conv_hyperparams_text_proto
,
conv_hyperparams_proto
)
scope_fn
=
hyperparams_builder
.
build
(
conv_hyperparams_proto
,
is_training
=
True
)
scope
=
scope_fn
()
conv_scope_arguments
=
scope
.
values
()[
0
]
regularizer
=
conv_scope_arguments
[
'weights_regularizer'
]
weights
=
np
.
array
([
1.
,
-
1
,
4.
,
2.
])
with
self
.
test_session
()
as
sess
:
result
=
sess
.
run
(
regularizer
(
tf
.
constant
(
weights
)))
self
.
assertAllClose
(
np
.
abs
(
weights
).
sum
()
*
0.5
,
result
)
def
test_return_l2_regularizer_weights
(
self
):
conv_hyperparams_text_proto
=
"""
regularizer {
l2_regularizer {
weight: 0.42
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto
=
hyperparams_pb2
.
Hyperparams
()
text_format
.
Merge
(
conv_hyperparams_text_proto
,
conv_hyperparams_proto
)
scope_fn
=
hyperparams_builder
.
build
(
conv_hyperparams_proto
,
is_training
=
True
)
scope
=
scope_fn
()
conv_scope_arguments
=
scope
[
_get_scope_key
(
slim
.
conv2d
)]
regularizer
=
conv_scope_arguments
[
'weights_regularizer'
]
weights
=
np
.
array
([
1.
,
-
1
,
4.
,
2.
])
with
self
.
test_session
()
as
sess
:
result
=
sess
.
run
(
regularizer
(
tf
.
constant
(
weights
)))
self
.
assertAllClose
(
np
.
power
(
weights
,
2
).
sum
()
/
2.0
*
0.42
,
result
)
def
test_return_non_default_batch_norm_params_with_train_during_train
(
self
):
conv_hyperparams_text_proto
=
"""
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
batch_norm {
decay: 0.7
center: false
scale: true
epsilon: 0.03
train: true
}
"""
conv_hyperparams_proto
=
hyperparams_pb2
.
Hyperparams
()
text_format
.
Merge
(
conv_hyperparams_text_proto
,
conv_hyperparams_proto
)
scope_fn
=
hyperparams_builder
.
build
(
conv_hyperparams_proto
,
is_training
=
True
)
scope
=
scope_fn
()
conv_scope_arguments
=
scope
[
_get_scope_key
(
slim
.
conv2d
)]
self
.
assertEqual
(
conv_scope_arguments
[
'normalizer_fn'
],
slim
.
batch_norm
)
batch_norm_params
=
scope
[
_get_scope_key
(
slim
.
batch_norm
)]
self
.
assertAlmostEqual
(
batch_norm_params
[
'decay'
],
0.7
)
self
.
assertAlmostEqual
(
batch_norm_params
[
'epsilon'
],
0.03
)
self
.
assertFalse
(
batch_norm_params
[
'center'
])
self
.
assertTrue
(
batch_norm_params
[
'scale'
])
self
.
assertTrue
(
batch_norm_params
[
'is_training'
])
def
test_return_batch_norm_params_with_notrain_during_eval
(
self
):
conv_hyperparams_text_proto
=
"""
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
batch_norm {
decay: 0.7
center: false
scale: true
epsilon: 0.03
train: true
}
"""
conv_hyperparams_proto
=
hyperparams_pb2
.
Hyperparams
()
text_format
.
Merge
(
conv_hyperparams_text_proto
,
conv_hyperparams_proto
)
scope_fn
=
hyperparams_builder
.
build
(
conv_hyperparams_proto
,
is_training
=
False
)
scope
=
scope_fn
()
conv_scope_arguments
=
scope
[
_get_scope_key
(
slim
.
conv2d
)]
self
.
assertEqual
(
conv_scope_arguments
[
'normalizer_fn'
],
slim
.
batch_norm
)
batch_norm_params
=
scope
[
_get_scope_key
(
slim
.
batch_norm
)]
self
.
assertAlmostEqual
(
batch_norm_params
[
'decay'
],
0.7
)
self
.
assertAlmostEqual
(
batch_norm_params
[
'epsilon'
],
0.03
)
self
.
assertFalse
(
batch_norm_params
[
'center'
])
self
.
assertTrue
(
batch_norm_params
[
'scale'
])
self
.
assertFalse
(
batch_norm_params
[
'is_training'
])
def
test_return_batch_norm_params_with_notrain_when_train_is_false
(
self
):
conv_hyperparams_text_proto
=
"""
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
batch_norm {
decay: 0.7
center: false
scale: true
epsilon: 0.03
train: false
}
"""
conv_hyperparams_proto
=
hyperparams_pb2
.
Hyperparams
()
text_format
.
Merge
(
conv_hyperparams_text_proto
,
conv_hyperparams_proto
)
scope_fn
=
hyperparams_builder
.
build
(
conv_hyperparams_proto
,
is_training
=
True
)
scope
=
scope_fn
()
conv_scope_arguments
=
scope
[
_get_scope_key
(
slim
.
conv2d
)]
self
.
assertEqual
(
conv_scope_arguments
[
'normalizer_fn'
],
slim
.
batch_norm
)
batch_norm_params
=
scope
[
_get_scope_key
(
slim
.
batch_norm
)]
self
.
assertAlmostEqual
(
batch_norm_params
[
'decay'
],
0.7
)
self
.
assertAlmostEqual
(
batch_norm_params
[
'epsilon'
],
0.03
)
self
.
assertFalse
(
batch_norm_params
[
'center'
])
self
.
assertTrue
(
batch_norm_params
[
'scale'
])
self
.
assertFalse
(
batch_norm_params
[
'is_training'
])
def
test_do_not_use_batch_norm_if_default
(
self
):
conv_hyperparams_text_proto
=
"""
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto
=
hyperparams_pb2
.
Hyperparams
()
text_format
.
Merge
(
conv_hyperparams_text_proto
,
conv_hyperparams_proto
)
scope_fn
=
hyperparams_builder
.
build
(
conv_hyperparams_proto
,
is_training
=
True
)
scope
=
scope_fn
()
conv_scope_arguments
=
scope
[
_get_scope_key
(
slim
.
conv2d
)]
self
.
assertEqual
(
conv_scope_arguments
[
'normalizer_fn'
],
None
)
def
test_use_none_activation
(
self
):
conv_hyperparams_text_proto
=
"""
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
activation: NONE
"""
conv_hyperparams_proto
=
hyperparams_pb2
.
Hyperparams
()
text_format
.
Merge
(
conv_hyperparams_text_proto
,
conv_hyperparams_proto
)
scope_fn
=
hyperparams_builder
.
build
(
conv_hyperparams_proto
,
is_training
=
True
)
scope
=
scope_fn
()
conv_scope_arguments
=
scope
[
_get_scope_key
(
slim
.
conv2d
)]
self
.
assertEqual
(
conv_scope_arguments
[
'activation_fn'
],
None
)
def
test_use_relu_activation
(
self
):
conv_hyperparams_text_proto
=
"""
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
activation: RELU
"""
conv_hyperparams_proto
=
hyperparams_pb2
.
Hyperparams
()
text_format
.
Merge
(
conv_hyperparams_text_proto
,
conv_hyperparams_proto
)
scope_fn
=
hyperparams_builder
.
build
(
conv_hyperparams_proto
,
is_training
=
True
)
scope
=
scope_fn
()
conv_scope_arguments
=
scope
[
_get_scope_key
(
slim
.
conv2d
)]
self
.
assertEqual
(
conv_scope_arguments
[
'activation_fn'
],
tf
.
nn
.
relu
)
def
test_use_relu_6_activation
(
self
):
conv_hyperparams_text_proto
=
"""
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
activation: RELU_6
"""
conv_hyperparams_proto
=
hyperparams_pb2
.
Hyperparams
()
text_format
.
Merge
(
conv_hyperparams_text_proto
,
conv_hyperparams_proto
)
scope_fn
=
hyperparams_builder
.
build
(
conv_hyperparams_proto
,
is_training
=
True
)
scope
=
scope_fn
()
conv_scope_arguments
=
scope
[
_get_scope_key
(
slim
.
conv2d
)]
self
.
assertEqual
(
conv_scope_arguments
[
'activation_fn'
],
tf
.
nn
.
relu6
)
def
_assert_variance_in_range
(
self
,
initializer
,
shape
,
variance
,
tol
=
1e-2
):
with
tf
.
Graph
().
as_default
()
as
g
:
with
self
.
test_session
(
graph
=
g
)
as
sess
:
var
=
tf
.
get_variable
(
name
=
'test'
,
shape
=
shape
,
dtype
=
tf
.
float32
,
initializer
=
initializer
)
sess
.
run
(
tf
.
global_variables_initializer
())
values
=
sess
.
run
(
var
)
self
.
assertAllClose
(
np
.
var
(
values
),
variance
,
tol
,
tol
)
def
test_variance_in_range_with_variance_scaling_initializer_fan_in
(
self
):
conv_hyperparams_text_proto
=
"""
regularizer {
l2_regularizer {
}
}
initializer {
variance_scaling_initializer {
factor: 2.0
mode: FAN_IN
uniform: false
}
}
"""
conv_hyperparams_proto
=
hyperparams_pb2
.
Hyperparams
()
text_format
.
Merge
(
conv_hyperparams_text_proto
,
conv_hyperparams_proto
)
scope_fn
=
hyperparams_builder
.
build
(
conv_hyperparams_proto
,
is_training
=
True
)
scope
=
scope_fn
()
conv_scope_arguments
=
scope
[
_get_scope_key
(
slim
.
conv2d
)]
initializer
=
conv_scope_arguments
[
'weights_initializer'
]
self
.
_assert_variance_in_range
(
initializer
,
shape
=
[
100
,
40
],
variance
=
2.
/
100.
)
def
test_variance_in_range_with_variance_scaling_initializer_fan_out
(
self
):
conv_hyperparams_text_proto
=
"""
regularizer {
l2_regularizer {
}
}
initializer {
variance_scaling_initializer {
factor: 2.0
mode: FAN_OUT
uniform: false
}
}
"""
conv_hyperparams_proto
=
hyperparams_pb2
.
Hyperparams
()
text_format
.
Merge
(
conv_hyperparams_text_proto
,
conv_hyperparams_proto
)
scope_fn
=
hyperparams_builder
.
build
(
conv_hyperparams_proto
,
is_training
=
True
)
scope
=
scope_fn
()
conv_scope_arguments
=
scope
[
_get_scope_key
(
slim
.
conv2d
)]
initializer
=
conv_scope_arguments
[
'weights_initializer'
]
self
.
_assert_variance_in_range
(
initializer
,
shape
=
[
100
,
40
],
variance
=
2.
/
40.
)
def
test_variance_in_range_with_variance_scaling_initializer_fan_avg
(
self
):
conv_hyperparams_text_proto
=
"""
regularizer {
l2_regularizer {
}
}
initializer {
variance_scaling_initializer {
factor: 2.0
mode: FAN_AVG
uniform: false
}
}
"""
conv_hyperparams_proto
=
hyperparams_pb2
.
Hyperparams
()
text_format
.
Merge
(
conv_hyperparams_text_proto
,
conv_hyperparams_proto
)
scope_fn
=
hyperparams_builder
.
build
(
conv_hyperparams_proto
,
is_training
=
True
)
scope
=
scope_fn
()
conv_scope_arguments
=
scope
[
_get_scope_key
(
slim
.
conv2d
)]
initializer
=
conv_scope_arguments
[
'weights_initializer'
]
self
.
_assert_variance_in_range
(
initializer
,
shape
=
[
100
,
40
],
variance
=
4.
/
(
100.
+
40.
))
def
test_variance_in_range_with_variance_scaling_initializer_uniform
(
self
):
conv_hyperparams_text_proto
=
"""
regularizer {
l2_regularizer {
}
}
initializer {
variance_scaling_initializer {
factor: 2.0
mode: FAN_IN
uniform: true
}
}
"""
conv_hyperparams_proto
=
hyperparams_pb2
.
Hyperparams
()
text_format
.
Merge
(
conv_hyperparams_text_proto
,
conv_hyperparams_proto
)
scope_fn
=
hyperparams_builder
.
build
(
conv_hyperparams_proto
,
is_training
=
True
)
scope
=
scope_fn
()
conv_scope_arguments
=
scope
[
_get_scope_key
(
slim
.
conv2d
)]
initializer
=
conv_scope_arguments
[
'weights_initializer'
]
self
.
_assert_variance_in_range
(
initializer
,
shape
=
[
100
,
40
],
variance
=
2.
/
100.
)
def
test_variance_in_range_with_truncated_normal_initializer
(
self
):
conv_hyperparams_text_proto
=
"""
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
mean: 0.0
stddev: 0.8
}
}
"""
conv_hyperparams_proto
=
hyperparams_pb2
.
Hyperparams
()
text_format
.
Merge
(
conv_hyperparams_text_proto
,
conv_hyperparams_proto
)
scope_fn
=
hyperparams_builder
.
build
(
conv_hyperparams_proto
,
is_training
=
True
)
scope
=
scope_fn
()
conv_scope_arguments
=
scope
[
_get_scope_key
(
slim
.
conv2d
)]
initializer
=
conv_scope_arguments
[
'weights_initializer'
]
self
.
_assert_variance_in_range
(
initializer
,
shape
=
[
100
,
40
],
variance
=
0.49
,
tol
=
1e-1
)
def
test_variance_in_range_with_random_normal_initializer
(
self
):
conv_hyperparams_text_proto
=
"""
regularizer {
l2_regularizer {
}
}
initializer {
random_normal_initializer {
mean: 0.0
stddev: 0.8
}
}
"""
conv_hyperparams_proto
=
hyperparams_pb2
.
Hyperparams
()
text_format
.
Merge
(
conv_hyperparams_text_proto
,
conv_hyperparams_proto
)
scope_fn
=
hyperparams_builder
.
build
(
conv_hyperparams_proto
,
is_training
=
True
)
scope
=
scope_fn
()
conv_scope_arguments
=
scope
[
_get_scope_key
(
slim
.
conv2d
)]
initializer
=
conv_scope_arguments
[
'weights_initializer'
]
self
.
_assert_variance_in_range
(
initializer
,
shape
=
[
100
,
40
],
variance
=
0.64
,
tol
=
1e-1
)
if
__name__
==
'__main__'
:
tf
.
test
.
main
()
research/mlperf_object_detection/Mask_RCNN/object_detection/builders/image_resizer_builder.py
deleted
100644 → 0
View file @
ad3526a9
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Builder function for image resizing operations."""
import
functools
import
tensorflow
as
tf
from
object_detection.core
import
preprocessor
from
object_detection.protos
import
image_resizer_pb2
def
_tf_resize_method
(
resize_method
):
"""Maps image resize method from enumeration type to TensorFlow.
Args:
resize_method: The resize_method attribute of keep_aspect_ratio_resizer or
fixed_shape_resizer.
Returns:
method: The corresponding TensorFlow ResizeMethod.
Raises:
ValueError: if `resize_method` is of unknown type.
"""
dict_method
=
{
image_resizer_pb2
.
BILINEAR
:
tf
.
image
.
ResizeMethod
.
BILINEAR
,
image_resizer_pb2
.
NEAREST_NEIGHBOR
:
tf
.
image
.
ResizeMethod
.
NEAREST_NEIGHBOR
,
image_resizer_pb2
.
BICUBIC
:
tf
.
image
.
ResizeMethod
.
BICUBIC
,
image_resizer_pb2
.
AREA
:
tf
.
image
.
ResizeMethod
.
AREA
}
if
resize_method
in
dict_method
:
return
dict_method
[
resize_method
]
else
:
raise
ValueError
(
'Unknown resize_method'
)
def
build
(
image_resizer_config
):
"""Builds callable for image resizing operations.
Args:
image_resizer_config: image_resizer.proto object containing parameters for
an image resizing operation.
Returns:
image_resizer_fn: Callable for image resizing. This callable always takes
a rank-3 image tensor (corresponding to a single image) and returns a
rank-3 image tensor, possibly with new spatial dimensions.
Raises:
ValueError: if `image_resizer_config` is of incorrect type.
ValueError: if `image_resizer_config.image_resizer_oneof` is of expected
type.
ValueError: if min_dimension > max_dimension when keep_aspect_ratio_resizer
is used.
"""
if
not
isinstance
(
image_resizer_config
,
image_resizer_pb2
.
ImageResizer
):
raise
ValueError
(
'image_resizer_config not of type '
'image_resizer_pb2.ImageResizer.'
)
image_resizer_oneof
=
image_resizer_config
.
WhichOneof
(
'image_resizer_oneof'
)
if
image_resizer_oneof
==
'keep_aspect_ratio_resizer'
:
keep_aspect_ratio_config
=
image_resizer_config
.
keep_aspect_ratio_resizer
if
not
(
keep_aspect_ratio_config
.
min_dimension
<=
keep_aspect_ratio_config
.
max_dimension
):
raise
ValueError
(
'min_dimension > max_dimension'
)
method
=
_tf_resize_method
(
keep_aspect_ratio_config
.
resize_method
)
per_channel_pad_value
=
(
0
,
0
,
0
)
if
keep_aspect_ratio_config
.
per_channel_pad_value
:
per_channel_pad_value
=
tuple
(
keep_aspect_ratio_config
.
per_channel_pad_value
)
image_resizer_fn
=
functools
.
partial
(
preprocessor
.
resize_to_range
,
min_dimension
=
keep_aspect_ratio_config
.
min_dimension
,
max_dimension
=
keep_aspect_ratio_config
.
max_dimension
,
method
=
method
,
pad_to_max_dimension
=
keep_aspect_ratio_config
.
pad_to_max_dimension
,
per_channel_pad_value
=
per_channel_pad_value
)
if
not
keep_aspect_ratio_config
.
convert_to_grayscale
:
return
image_resizer_fn
elif
image_resizer_oneof
==
'fixed_shape_resizer'
:
fixed_shape_resizer_config
=
image_resizer_config
.
fixed_shape_resizer
method
=
_tf_resize_method
(
fixed_shape_resizer_config
.
resize_method
)
image_resizer_fn
=
functools
.
partial
(
preprocessor
.
resize_image
,
new_height
=
fixed_shape_resizer_config
.
height
,
new_width
=
fixed_shape_resizer_config
.
width
,
method
=
method
)
if
not
fixed_shape_resizer_config
.
convert_to_grayscale
:
return
image_resizer_fn
else
:
raise
ValueError
(
'Invalid image resizer option:
\'
%s
\'
.'
%
image_resizer_oneof
)
def
grayscale_image_resizer
(
image
):
[
resized_image
,
resized_image_shape
]
=
image_resizer_fn
(
image
)
grayscale_image
=
preprocessor
.
rgb_to_gray
(
resized_image
)
grayscale_image_shape
=
tf
.
concat
([
resized_image_shape
[:
-
1
],
[
1
]],
0
)
return
[
grayscale_image
,
grayscale_image_shape
]
return
functools
.
partial
(
grayscale_image_resizer
)
research/mlperf_object_detection/Mask_RCNN/object_detection/builders/image_resizer_builder_test.py
deleted
100644 → 0
View file @
ad3526a9
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.builders.image_resizer_builder."""
import
numpy
as
np
import
tensorflow
as
tf
from
google.protobuf
import
text_format
from
object_detection.builders
import
image_resizer_builder
from
object_detection.protos
import
image_resizer_pb2
class
ImageResizerBuilderTest
(
tf
.
test
.
TestCase
):
def
_shape_of_resized_random_image_given_text_proto
(
self
,
input_shape
,
text_proto
):
image_resizer_config
=
image_resizer_pb2
.
ImageResizer
()
text_format
.
Merge
(
text_proto
,
image_resizer_config
)
image_resizer_fn
=
image_resizer_builder
.
build
(
image_resizer_config
)
images
=
tf
.
to_float
(
tf
.
random_uniform
(
input_shape
,
minval
=
0
,
maxval
=
255
,
dtype
=
tf
.
int32
))
resized_images
,
_
=
image_resizer_fn
(
images
)
with
self
.
test_session
()
as
sess
:
return
sess
.
run
(
resized_images
).
shape
def
test_build_keep_aspect_ratio_resizer_returns_expected_shape
(
self
):
image_resizer_text_proto
=
"""
keep_aspect_ratio_resizer {
min_dimension: 10
max_dimension: 20
}
"""
input_shape
=
(
50
,
25
,
3
)
expected_output_shape
=
(
20
,
10
,
3
)
output_shape
=
self
.
_shape_of_resized_random_image_given_text_proto
(
input_shape
,
image_resizer_text_proto
)
self
.
assertEqual
(
output_shape
,
expected_output_shape
)
def
test_build_keep_aspect_ratio_resizer_with_padding
(
self
):
image_resizer_text_proto
=
"""
keep_aspect_ratio_resizer {
min_dimension: 10
max_dimension: 20
pad_to_max_dimension: true
per_channel_pad_value: 3
per_channel_pad_value: 4
per_channel_pad_value: 5
}
"""
input_shape
=
(
50
,
25
,
3
)
expected_output_shape
=
(
20
,
20
,
3
)
output_shape
=
self
.
_shape_of_resized_random_image_given_text_proto
(
input_shape
,
image_resizer_text_proto
)
self
.
assertEqual
(
output_shape
,
expected_output_shape
)
def
test_built_fixed_shape_resizer_returns_expected_shape
(
self
):
image_resizer_text_proto
=
"""
fixed_shape_resizer {
height: 10
width: 20
}
"""
input_shape
=
(
50
,
25
,
3
)
expected_output_shape
=
(
10
,
20
,
3
)
output_shape
=
self
.
_shape_of_resized_random_image_given_text_proto
(
input_shape
,
image_resizer_text_proto
)
self
.
assertEqual
(
output_shape
,
expected_output_shape
)
def
test_raises_error_on_invalid_input
(
self
):
invalid_input
=
'invalid_input'
with
self
.
assertRaises
(
ValueError
):
image_resizer_builder
.
build
(
invalid_input
)
def
_resized_image_given_text_proto
(
self
,
image
,
text_proto
):
image_resizer_config
=
image_resizer_pb2
.
ImageResizer
()
text_format
.
Merge
(
text_proto
,
image_resizer_config
)
image_resizer_fn
=
image_resizer_builder
.
build
(
image_resizer_config
)
image_placeholder
=
tf
.
placeholder
(
tf
.
uint8
,
[
1
,
None
,
None
,
3
])
resized_image
,
_
=
image_resizer_fn
(
image_placeholder
)
with
self
.
test_session
()
as
sess
:
return
sess
.
run
(
resized_image
,
feed_dict
=
{
image_placeholder
:
image
})
def
test_fixed_shape_resizer_nearest_neighbor_method
(
self
):
image_resizer_text_proto
=
"""
fixed_shape_resizer {
height: 1
width: 1
resize_method: NEAREST_NEIGHBOR
}
"""
image
=
np
.
array
([[
1
,
2
,
3
],
[
4
,
5
,
6
],
[
7
,
8
,
9
]])
image
=
np
.
expand_dims
(
image
,
axis
=
2
)
image
=
np
.
tile
(
image
,
(
1
,
1
,
3
))
image
=
np
.
expand_dims
(
image
,
axis
=
0
)
resized_image
=
self
.
_resized_image_given_text_proto
(
image
,
image_resizer_text_proto
)
vals
=
np
.
unique
(
resized_image
).
tolist
()
self
.
assertEqual
(
len
(
vals
),
1
)
self
.
assertEqual
(
vals
[
0
],
1
)
if
__name__
==
'__main__'
:
tf
.
test
.
main
()
research/mlperf_object_detection/Mask_RCNN/object_detection/builders/input_reader_builder.py
deleted
100644 → 0
View file @
ad3526a9
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Input reader builder.
Creates data sources for DetectionModels from an InputReader config. See
input_reader.proto for options.
Note: If users wishes to also use their own InputReaders with the Object
Detection configuration framework, they should define their own builder function
that wraps the build function.
"""
import
tensorflow
as
tf
from
object_detection.data_decoders
import
tf_example_decoder
from
object_detection.protos
import
input_reader_pb2
parallel_reader
=
tf
.
contrib
.
slim
.
parallel_reader
def
build
(
input_reader_config
):
"""Builds a tensor dictionary based on the InputReader config.
Args:
input_reader_config: A input_reader_pb2.InputReader object.
Returns:
A tensor dict based on the input_reader_config.
Raises:
ValueError: On invalid input reader proto.
ValueError: If no input paths are specified.
"""
if
not
isinstance
(
input_reader_config
,
input_reader_pb2
.
InputReader
):
raise
ValueError
(
'input_reader_config not of type '
'input_reader_pb2.InputReader.'
)
if
input_reader_config
.
WhichOneof
(
'input_reader'
)
==
'tf_record_input_reader'
:
config
=
input_reader_config
.
tf_record_input_reader
if
not
config
.
input_path
:
raise
ValueError
(
'At least one input path must be specified in '
'`input_reader_config`.'
)
_
,
string_tensor
=
parallel_reader
.
parallel_read
(
config
.
input_path
[:],
# Convert `RepeatedScalarContainer` to list.
reader_class
=
tf
.
TFRecordReader
,
num_epochs
=
(
input_reader_config
.
num_epochs
if
input_reader_config
.
num_epochs
else
None
),
num_readers
=
input_reader_config
.
num_readers
,
shuffle
=
input_reader_config
.
shuffle
,
dtypes
=
[
tf
.
string
,
tf
.
string
],
capacity
=
input_reader_config
.
queue_capacity
,
min_after_dequeue
=
input_reader_config
.
min_after_dequeue
)
label_map_proto_file
=
None
if
input_reader_config
.
HasField
(
'label_map_path'
):
label_map_proto_file
=
input_reader_config
.
label_map_path
decoder
=
tf_example_decoder
.
TfExampleDecoder
(
load_instance_masks
=
input_reader_config
.
load_instance_masks
,
instance_mask_type
=
input_reader_config
.
mask_type
,
label_map_proto_file
=
label_map_proto_file
)
return
decoder
.
decode
(
string_tensor
)
raise
ValueError
(
'Unsupported input_reader_config.'
)
research/mlperf_object_detection/Mask_RCNN/object_detection/builders/input_reader_builder_test.py
deleted
100644 → 0
View file @
ad3526a9
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for input_reader_builder."""
import
os
import
numpy
as
np
import
tensorflow
as
tf
from
google.protobuf
import
text_format
from
tensorflow.core.example
import
example_pb2
from
tensorflow.core.example
import
feature_pb2
from
object_detection.builders
import
input_reader_builder
from
object_detection.core
import
standard_fields
as
fields
from
object_detection.protos
import
input_reader_pb2
class
InputReaderBuilderTest
(
tf
.
test
.
TestCase
):
def
create_tf_record
(
self
):
path
=
os
.
path
.
join
(
self
.
get_temp_dir
(),
'tfrecord'
)
writer
=
tf
.
python_io
.
TFRecordWriter
(
path
)
image_tensor
=
np
.
random
.
randint
(
255
,
size
=
(
4
,
5
,
3
)).
astype
(
np
.
uint8
)
flat_mask
=
(
4
*
5
)
*
[
1.0
]
with
self
.
test_session
():
encoded_jpeg
=
tf
.
image
.
encode_jpeg
(
tf
.
constant
(
image_tensor
)).
eval
()
example
=
example_pb2
.
Example
(
features
=
feature_pb2
.
Features
(
feature
=
{
'image/encoded'
:
feature_pb2
.
Feature
(
bytes_list
=
feature_pb2
.
BytesList
(
value
=
[
encoded_jpeg
])),
'image/format'
:
feature_pb2
.
Feature
(
bytes_list
=
feature_pb2
.
BytesList
(
value
=
[
'jpeg'
.
encode
(
'utf-8'
)])),
'image/height'
:
feature_pb2
.
Feature
(
int64_list
=
feature_pb2
.
Int64List
(
value
=
[
4
])),
'image/width'
:
feature_pb2
.
Feature
(
int64_list
=
feature_pb2
.
Int64List
(
value
=
[
5
])),
'image/object/bbox/xmin'
:
feature_pb2
.
Feature
(
float_list
=
feature_pb2
.
FloatList
(
value
=
[
0.0
])),
'image/object/bbox/xmax'
:
feature_pb2
.
Feature
(
float_list
=
feature_pb2
.
FloatList
(
value
=
[
1.0
])),
'image/object/bbox/ymin'
:
feature_pb2
.
Feature
(
float_list
=
feature_pb2
.
FloatList
(
value
=
[
0.0
])),
'image/object/bbox/ymax'
:
feature_pb2
.
Feature
(
float_list
=
feature_pb2
.
FloatList
(
value
=
[
1.0
])),
'image/object/class/label'
:
feature_pb2
.
Feature
(
int64_list
=
feature_pb2
.
Int64List
(
value
=
[
2
])),
'image/object/mask'
:
feature_pb2
.
Feature
(
float_list
=
feature_pb2
.
FloatList
(
value
=
flat_mask
)),
}))
writer
.
write
(
example
.
SerializeToString
())
writer
.
close
()
return
path
def
test_build_tf_record_input_reader
(
self
):
tf_record_path
=
self
.
create_tf_record
()
input_reader_text_proto
=
"""
shuffle: false
num_readers: 1
tf_record_input_reader {{
input_path: '{0}'
}}
"""
.
format
(
tf_record_path
)
input_reader_proto
=
input_reader_pb2
.
InputReader
()
text_format
.
Merge
(
input_reader_text_proto
,
input_reader_proto
)
tensor_dict
=
input_reader_builder
.
build
(
input_reader_proto
)
sv
=
tf
.
train
.
Supervisor
(
logdir
=
self
.
get_temp_dir
())
with
sv
.
prepare_or_wait_for_session
()
as
sess
:
sv
.
start_queue_runners
(
sess
)
output_dict
=
sess
.
run
(
tensor_dict
)
self
.
assertTrue
(
fields
.
InputDataFields
.
groundtruth_instance_masks
not
in
output_dict
)
self
.
assertEquals
(
(
4
,
5
,
3
),
output_dict
[
fields
.
InputDataFields
.
image
].
shape
)
self
.
assertEquals
(
[
2
],
output_dict
[
fields
.
InputDataFields
.
groundtruth_classes
])
self
.
assertEquals
(
(
1
,
4
),
output_dict
[
fields
.
InputDataFields
.
groundtruth_boxes
].
shape
)
self
.
assertAllEqual
(
[
0.0
,
0.0
,
1.0
,
1.0
],
output_dict
[
fields
.
InputDataFields
.
groundtruth_boxes
][
0
])
def
test_build_tf_record_input_reader_and_load_instance_masks
(
self
):
tf_record_path
=
self
.
create_tf_record
()
input_reader_text_proto
=
"""
shuffle: false
num_readers: 1
load_instance_masks: true
tf_record_input_reader {{
input_path: '{0}'
}}
"""
.
format
(
tf_record_path
)
input_reader_proto
=
input_reader_pb2
.
InputReader
()
text_format
.
Merge
(
input_reader_text_proto
,
input_reader_proto
)
tensor_dict
=
input_reader_builder
.
build
(
input_reader_proto
)
sv
=
tf
.
train
.
Supervisor
(
logdir
=
self
.
get_temp_dir
())
with
sv
.
prepare_or_wait_for_session
()
as
sess
:
sv
.
start_queue_runners
(
sess
)
output_dict
=
sess
.
run
(
tensor_dict
)
self
.
assertEquals
(
(
4
,
5
,
3
),
output_dict
[
fields
.
InputDataFields
.
image
].
shape
)
self
.
assertEquals
(
[
2
],
output_dict
[
fields
.
InputDataFields
.
groundtruth_classes
])
self
.
assertEquals
(
(
1
,
4
),
output_dict
[
fields
.
InputDataFields
.
groundtruth_boxes
].
shape
)
self
.
assertAllEqual
(
[
0.0
,
0.0
,
1.0
,
1.0
],
output_dict
[
fields
.
InputDataFields
.
groundtruth_boxes
][
0
])
self
.
assertAllEqual
(
(
1
,
4
,
5
),
output_dict
[
fields
.
InputDataFields
.
groundtruth_instance_masks
].
shape
)
def
test_raises_error_with_no_input_paths
(
self
):
input_reader_text_proto
=
"""
shuffle: false
num_readers: 1
load_instance_masks: true
"""
input_reader_proto
=
input_reader_pb2
.
InputReader
()
text_format
.
Merge
(
input_reader_text_proto
,
input_reader_proto
)
with
self
.
assertRaises
(
ValueError
):
input_reader_builder
.
build
(
input_reader_proto
)
if
__name__
==
'__main__'
:
tf
.
test
.
main
()
Prev
1
2
3
4
5
6
…
9
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment