Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ModelZoo
ResNet50_tensorflow
Commits
4ddc9f2d
Commit
4ddc9f2d
authored
Jul 18, 2017
by
Derek Chow
Browse files
Change exporter to allow dynamic batch inference.
parent
213125e3
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
160 additions
and
73 deletions
+160
-73
object_detection/export_inference_graph.py
object_detection/export_inference_graph.py
+7
-6
object_detection/exporter.py
object_detection/exporter.py
+33
-16
object_detection/exporter_test.py
object_detection/exporter_test.py
+120
-51
No files found.
object_detection/export_inference_graph.py
View file @
4ddc9f2d
...
@@ -22,11 +22,13 @@ SavedModel (https://tensorflow.github.io/serving/serving_basic.html).
...
@@ -22,11 +22,13 @@ SavedModel (https://tensorflow.github.io/serving/serving_basic.html).
The inference graph contains one of three input nodes depending on the user
The inference graph contains one of three input nodes depending on the user
specified option.
specified option.
* `image_tensor`: Accepts a uint8 4-D tensor of shape [1, None, None, 3]
* `image_tensor`: Accepts a uint8 4-D tensor of shape [None, None, None, 3]
* `encoded_image_string_tensor`: Accepts a scalar string tensor of encoded PNG
* `encoded_image_string_tensor`: Accepts a 1-D string tensor of shape [None]
or JPEG image.
containing encoded PNG or JPEG images. Image resolutions are expected to be
* `tf_example`: Accepts a serialized TFExample proto. The batch size in this
the same if more than 1 image is provided.
case is always 1.
* `tf_example`: Accepts a 1-D string tensor of shape [None] containing
serialized TFExample protos. Image resolutions are expected to be the same
if more than 1 image is provided.
and the following output nodes returned by the model.postprocess(..):
and the following output nodes returned by the model.postprocess(..):
* `num_detections`: Outputs float32 tensors of the form [batch]
* `num_detections`: Outputs float32 tensors of the form [batch]
...
@@ -43,7 +45,6 @@ and the following output nodes returned by the model.postprocess(..):
...
@@ -43,7 +45,6 @@ and the following output nodes returned by the model.postprocess(..):
tensors returned by the model.
tensors returned by the model.
Notes:
Notes:
* Currently `batch` is always 1, but we will support `batch` > 1 in the future.
* This tool uses `use_moving_averages` from eval_config to decide which
* This tool uses `use_moving_averages` from eval_config to decide which
weights to freeze.
weights to freeze.
...
...
object_detection/exporter.py
View file @
4ddc9f2d
...
@@ -111,29 +111,46 @@ def freeze_graph_with_def_protos(
...
@@ -111,29 +111,46 @@ def freeze_graph_with_def_protos(
return
output_graph_def
return
output_graph_def
# TODO: Support batch tf example inputs.
def
_tf_example_input_placeholder
():
tf_example_placeholder
=
tf
.
placeholder
(
tf
.
string
,
shape
=
[],
name
=
'tf_example'
)
tensor_dict
=
tf_example_decoder
.
TfExampleDecoder
().
decode
(
tf_example_placeholder
)
image
=
tensor_dict
[
fields
.
InputDataFields
.
image
]
return
tf
.
expand_dims
(
image
,
axis
=
0
)
def
_image_tensor_input_placeholder
():
def
_image_tensor_input_placeholder
():
"""Returns input node that accepts a batch of uint8 images."""
return
tf
.
placeholder
(
dtype
=
tf
.
uint8
,
return
tf
.
placeholder
(
dtype
=
tf
.
uint8
,
shape
=
(
1
,
None
,
None
,
3
),
shape
=
(
None
,
None
,
None
,
3
),
name
=
'image_tensor'
)
name
=
'image_tensor'
)
def
_tf_example_input_placeholder
():
"""Returns input node that accepts a batch of strings with tf examples."""
batch_tf_example_placeholder
=
tf
.
placeholder
(
tf
.
string
,
shape
=
[
None
],
name
=
'tf_example'
)
def
decode
(
tf_example_string_tensor
):
tensor_dict
=
tf_example_decoder
.
TfExampleDecoder
().
decode
(
tf_example_string_tensor
)
image_tensor
=
tensor_dict
[
fields
.
InputDataFields
.
image
]
return
image_tensor
return
tf
.
map_fn
(
decode
,
elems
=
batch_tf_example_placeholder
,
dtype
=
tf
.
uint8
,
parallel_iterations
=
32
,
back_prop
=
False
)
def
_encoded_image_string_tensor_input_placeholder
():
def
_encoded_image_string_tensor_input_placeholder
():
image_str
=
tf
.
placeholder
(
dtype
=
tf
.
string
,
"""Returns input node that accepts a batch of PNG or JPEG strings."""
shape
=
[],
batch_image_str_placeholder
=
tf
.
placeholder
(
name
=
'encoded_image_string_tensor'
)
dtype
=
tf
.
string
,
image_tensor
=
tf
.
image
.
decode_image
(
image_str
,
channels
=
3
)
shape
=
[
None
],
image_tensor
.
set_shape
((
None
,
None
,
3
))
name
=
'encoded_image_string_tensor'
)
return
tf
.
expand_dims
(
image_tensor
,
axis
=
0
)
def
decode
(
encoded_image_string_tensor
):
image_tensor
=
tf
.
image
.
decode_image
(
encoded_image_string_tensor
,
channels
=
3
)
image_tensor
.
set_shape
((
None
,
None
,
3
))
return
image_tensor
return
tf
.
map_fn
(
decode
,
elems
=
batch_image_str_placeholder
,
dtype
=
tf
.
uint8
,
parallel_iterations
=
32
,
back_prop
=
False
)
input_placeholder_fn_map
=
{
input_placeholder_fn_map
=
{
...
...
object_detection/exporter_test.py
View file @
4ddc9f2d
...
@@ -43,15 +43,19 @@ class FakeModel(model.DetectionModel):
...
@@ -43,15 +43,19 @@ class FakeModel(model.DetectionModel):
def
postprocess
(
self
,
prediction_dict
):
def
postprocess
(
self
,
prediction_dict
):
with
tf
.
control_dependencies
(
prediction_dict
.
values
()):
with
tf
.
control_dependencies
(
prediction_dict
.
values
()):
postprocessed_tensors
=
{
postprocessed_tensors
=
{
'detection_boxes'
:
tf
.
constant
([[
0.0
,
0.0
,
0.5
,
0.5
],
'detection_boxes'
:
tf
.
constant
([[[
0.0
,
0.0
,
0.5
,
0.5
],
[
0.5
,
0.5
,
0.8
,
0.8
]],
tf
.
float32
),
[
0.5
,
0.5
,
0.8
,
0.8
]],
'detection_scores'
:
tf
.
constant
([[
0.7
,
0.6
]],
tf
.
float32
),
[[
0.5
,
0.5
,
1.0
,
1.0
],
'detection_classes'
:
tf
.
constant
([[
0
,
1
]],
tf
.
float32
),
[
0.0
,
0.0
,
0.0
,
0.0
]]],
tf
.
float32
),
'num_detections'
:
tf
.
constant
([
2
],
tf
.
float32
)
'detection_scores'
:
tf
.
constant
([[
0.7
,
0.6
],
[
0.9
,
0.0
]],
tf
.
float32
),
'detection_classes'
:
tf
.
constant
([[
0
,
1
],
[
1
,
0
]],
tf
.
float32
),
'num_detections'
:
tf
.
constant
([
2
,
1
],
tf
.
float32
)
}
}
if
self
.
_add_detection_masks
:
if
self
.
_add_detection_masks
:
postprocessed_tensors
[
'detection_masks'
]
=
tf
.
constant
(
postprocessed_tensors
[
'detection_masks'
]
=
tf
.
constant
(
np
.
arange
(
32
).
reshape
([
2
,
4
,
4
]),
tf
.
float32
)
np
.
arange
(
64
).
reshape
([
2
,
2
,
4
,
4
]),
tf
.
float32
)
return
postprocessed_tensors
return
postprocessed_tensors
def
restore_map
(
self
,
checkpoint_path
,
from_detection_checkpoint
):
def
restore_map
(
self
,
checkpoint_path
,
from_detection_checkpoint
):
...
@@ -69,7 +73,7 @@ class ExportInferenceGraphTest(tf.test.TestCase):
...
@@ -69,7 +73,7 @@ class ExportInferenceGraphTest(tf.test.TestCase):
with
g
.
as_default
():
with
g
.
as_default
():
mock_model
=
FakeModel
()
mock_model
=
FakeModel
()
preprocessed_inputs
=
mock_model
.
preprocess
(
preprocessed_inputs
=
mock_model
.
preprocess
(
tf
.
ones
([
1
,
3
,
4
,
3
],
tf
.
float32
))
tf
.
placeholder
(
tf
.
float32
,
shape
=
[
None
,
None
,
None
,
3
]
))
predictions
=
mock_model
.
predict
(
preprocessed_inputs
)
predictions
=
mock_model
.
predict
(
preprocessed_inputs
)
mock_model
.
postprocess
(
predictions
)
mock_model
.
postprocess
(
predictions
)
if
use_moving_averages
:
if
use_moving_averages
:
...
@@ -250,15 +254,19 @@ class ExportInferenceGraphTest(tf.test.TestCase):
...
@@ -250,15 +254,19 @@ class ExportInferenceGraphTest(tf.test.TestCase):
classes
=
inference_graph
.
get_tensor_by_name
(
'detection_classes:0'
)
classes
=
inference_graph
.
get_tensor_by_name
(
'detection_classes:0'
)
masks
=
inference_graph
.
get_tensor_by_name
(
'detection_masks:0'
)
masks
=
inference_graph
.
get_tensor_by_name
(
'detection_masks:0'
)
num_detections
=
inference_graph
.
get_tensor_by_name
(
'num_detections:0'
)
num_detections
=
inference_graph
.
get_tensor_by_name
(
'num_detections:0'
)
(
boxes
,
scores
,
classes
,
masks
,
num_detections
)
=
sess
.
run
(
(
boxes
_np
,
scores
_np
,
classes
_np
,
masks
_np
,
num_detections
_np
)
=
sess
.
run
(
[
boxes
,
scores
,
classes
,
masks
,
num_detections
],
[
boxes
,
scores
,
classes
,
masks
,
num_detections
],
feed_dict
=
{
image_tensor
:
np
.
ones
((
1
,
4
,
4
,
3
)).
astype
(
np
.
uint8
)})
feed_dict
=
{
image_tensor
:
np
.
ones
((
2
,
4
,
4
,
3
)).
astype
(
np
.
uint8
)})
self
.
assertAllClose
(
boxes
,
[[
0.0
,
0.0
,
0.5
,
0.5
],
self
.
assertAllClose
(
boxes_np
,
[[[
0.0
,
0.0
,
0.5
,
0.5
],
[
0.5
,
0.5
,
0.8
,
0.8
]])
[
0.5
,
0.5
,
0.8
,
0.8
]],
self
.
assertAllClose
(
scores
,
[[
0.7
,
0.6
]])
[[
0.5
,
0.5
,
1.0
,
1.0
],
self
.
assertAllClose
(
classes
,
[[
1
,
2
]])
[
0.0
,
0.0
,
0.0
,
0.0
]]])
self
.
assertAllClose
(
masks
,
np
.
arange
(
32
).
reshape
([
2
,
4
,
4
]))
self
.
assertAllClose
(
scores_np
,
[[
0.7
,
0.6
],
self
.
assertAllClose
(
num_detections
,
[
2
])
[
0.9
,
0.0
]])
self
.
assertAllClose
(
classes_np
,
[[
1
,
2
],
[
2
,
1
]])
self
.
assertAllClose
(
masks_np
,
np
.
arange
(
64
).
reshape
([
2
,
2
,
4
,
4
]))
self
.
assertAllClose
(
num_detections_np
,
[
2
,
1
])
def
_create_encoded_image_string
(
self
,
image_array_np
,
encoding_format
):
def
_create_encoded_image_string
(
self
,
image_array_np
,
encoding_format
):
od_graph
=
tf
.
Graph
()
od_graph
=
tf
.
Graph
()
...
@@ -305,16 +313,60 @@ class ExportInferenceGraphTest(tf.test.TestCase):
...
@@ -305,16 +313,60 @@ class ExportInferenceGraphTest(tf.test.TestCase):
masks
=
inference_graph
.
get_tensor_by_name
(
'detection_masks:0'
)
masks
=
inference_graph
.
get_tensor_by_name
(
'detection_masks:0'
)
num_detections
=
inference_graph
.
get_tensor_by_name
(
'num_detections:0'
)
num_detections
=
inference_graph
.
get_tensor_by_name
(
'num_detections:0'
)
for
image_str
in
[
jpg_image_str
,
png_image_str
]:
for
image_str
in
[
jpg_image_str
,
png_image_str
]:
image_str_batch_np
=
np
.
hstack
([
image_str
]
*
2
)
(
boxes_np
,
scores_np
,
classes_np
,
masks_np
,
(
boxes_np
,
scores_np
,
classes_np
,
masks_np
,
num_detections_np
)
=
sess
.
run
(
num_detections_np
)
=
sess
.
run
(
[
boxes
,
scores
,
classes
,
masks
,
num_detections
],
[
boxes
,
scores
,
classes
,
masks
,
num_detections
],
feed_dict
=
{
image_str_tensor
:
image_str
})
feed_dict
=
{
image_str_tensor
:
image_str_batch_np
})
self
.
assertAllClose
(
boxes_np
,
[[
0.0
,
0.0
,
0.5
,
0.5
],
self
.
assertAllClose
(
boxes_np
,
[[[
0.0
,
0.0
,
0.5
,
0.5
],
[
0.5
,
0.5
,
0.8
,
0.8
]])
[
0.5
,
0.5
,
0.8
,
0.8
]],
self
.
assertAllClose
(
scores_np
,
[[
0.7
,
0.6
]])
[[
0.5
,
0.5
,
1.0
,
1.0
],
self
.
assertAllClose
(
classes_np
,
[[
1
,
2
]])
[
0.0
,
0.0
,
0.0
,
0.0
]]])
self
.
assertAllClose
(
masks_np
,
np
.
arange
(
32
).
reshape
([
2
,
4
,
4
]))
self
.
assertAllClose
(
scores_np
,
[[
0.7
,
0.6
],
self
.
assertAllClose
(
num_detections_np
,
[
2
])
[
0.9
,
0.0
]])
self
.
assertAllClose
(
classes_np
,
[[
1
,
2
],
[
2
,
1
]])
self
.
assertAllClose
(
masks_np
,
np
.
arange
(
64
).
reshape
([
2
,
2
,
4
,
4
]))
self
.
assertAllClose
(
num_detections_np
,
[
2
,
1
])
def
test_raise_runtime_error_on_images_with_different_sizes
(
self
):
tmp_dir
=
self
.
get_temp_dir
()
trained_checkpoint_prefix
=
os
.
path
.
join
(
tmp_dir
,
'model.ckpt'
)
self
.
_save_checkpoint_from_mock_model
(
trained_checkpoint_prefix
,
use_moving_averages
=
True
)
output_directory
=
os
.
path
.
join
(
tmp_dir
,
'output'
)
inference_graph_path
=
os
.
path
.
join
(
output_directory
,
'frozen_inference_graph.pb'
)
with
mock
.
patch
.
object
(
model_builder
,
'build'
,
autospec
=
True
)
as
mock_builder
:
mock_builder
.
return_value
=
FakeModel
(
add_detection_masks
=
True
)
pipeline_config
=
pipeline_pb2
.
TrainEvalPipelineConfig
()
pipeline_config
.
eval_config
.
use_moving_averages
=
False
exporter
.
export_inference_graph
(
input_type
=
'encoded_image_string_tensor'
,
pipeline_config
=
pipeline_config
,
trained_checkpoint_prefix
=
trained_checkpoint_prefix
,
output_directory
=
output_directory
)
inference_graph
=
self
.
_load_inference_graph
(
inference_graph_path
)
large_image
=
self
.
_create_encoded_image_string
(
np
.
ones
((
4
,
4
,
3
)).
astype
(
np
.
uint8
),
'jpg'
)
small_image
=
self
.
_create_encoded_image_string
(
np
.
ones
((
2
,
2
,
3
)).
astype
(
np
.
uint8
),
'jpg'
)
image_str_batch_np
=
np
.
hstack
([
large_image
,
small_image
])
with
self
.
test_session
(
graph
=
inference_graph
)
as
sess
:
image_str_tensor
=
inference_graph
.
get_tensor_by_name
(
'encoded_image_string_tensor:0'
)
boxes
=
inference_graph
.
get_tensor_by_name
(
'detection_boxes:0'
)
scores
=
inference_graph
.
get_tensor_by_name
(
'detection_scores:0'
)
classes
=
inference_graph
.
get_tensor_by_name
(
'detection_classes:0'
)
masks
=
inference_graph
.
get_tensor_by_name
(
'detection_masks:0'
)
num_detections
=
inference_graph
.
get_tensor_by_name
(
'num_detections:0'
)
with
self
.
assertRaisesRegexp
(
tf
.
errors
.
InvalidArgumentError
,
'^TensorArray has inconsistent shapes.'
):
sess
.
run
([
boxes
,
scores
,
classes
,
masks
,
num_detections
],
feed_dict
=
{
image_str_tensor
:
image_str_batch_np
})
def
test_export_and_run_inference_with_tf_example
(
self
):
def
test_export_and_run_inference_with_tf_example
(
self
):
tmp_dir
=
self
.
get_temp_dir
()
tmp_dir
=
self
.
get_temp_dir
()
...
@@ -336,6 +388,8 @@ class ExportInferenceGraphTest(tf.test.TestCase):
...
@@ -336,6 +388,8 @@ class ExportInferenceGraphTest(tf.test.TestCase):
output_directory
=
output_directory
)
output_directory
=
output_directory
)
inference_graph
=
self
.
_load_inference_graph
(
inference_graph_path
)
inference_graph
=
self
.
_load_inference_graph
(
inference_graph_path
)
tf_example_np
=
np
.
expand_dims
(
self
.
_create_tf_example
(
np
.
ones
((
4
,
4
,
3
)).
astype
(
np
.
uint8
)),
axis
=
0
)
with
self
.
test_session
(
graph
=
inference_graph
)
as
sess
:
with
self
.
test_session
(
graph
=
inference_graph
)
as
sess
:
tf_example
=
inference_graph
.
get_tensor_by_name
(
'tf_example:0'
)
tf_example
=
inference_graph
.
get_tensor_by_name
(
'tf_example:0'
)
boxes
=
inference_graph
.
get_tensor_by_name
(
'detection_boxes:0'
)
boxes
=
inference_graph
.
get_tensor_by_name
(
'detection_boxes:0'
)
...
@@ -343,16 +397,19 @@ class ExportInferenceGraphTest(tf.test.TestCase):
...
@@ -343,16 +397,19 @@ class ExportInferenceGraphTest(tf.test.TestCase):
classes
=
inference_graph
.
get_tensor_by_name
(
'detection_classes:0'
)
classes
=
inference_graph
.
get_tensor_by_name
(
'detection_classes:0'
)
masks
=
inference_graph
.
get_tensor_by_name
(
'detection_masks:0'
)
masks
=
inference_graph
.
get_tensor_by_name
(
'detection_masks:0'
)
num_detections
=
inference_graph
.
get_tensor_by_name
(
'num_detections:0'
)
num_detections
=
inference_graph
.
get_tensor_by_name
(
'num_detections:0'
)
(
boxes
,
scores
,
classes
,
masks
,
num_detections
)
=
sess
.
run
(
(
boxes
_np
,
scores
_np
,
classes
_np
,
masks
_np
,
num_detections
_np
)
=
sess
.
run
(
[
boxes
,
scores
,
classes
,
masks
,
num_detections
],
[
boxes
,
scores
,
classes
,
masks
,
num_detections
],
feed_dict
=
{
tf_example
:
self
.
_create_tf_example
(
feed_dict
=
{
tf_example
:
tf_example_np
})
np
.
ones
((
4
,
4
,
3
)).
astype
(
np
.
uint8
))})
self
.
assertAllClose
(
boxes_np
,
[[[
0.0
,
0.0
,
0.5
,
0.5
],
self
.
assertAllClose
(
boxes
,
[[
0.0
,
0.0
,
0.5
,
0.5
],
[
0.5
,
0.5
,
0.8
,
0.8
]],
[
0.5
,
0.5
,
0.8
,
0.8
]])
[[
0.5
,
0.5
,
1.0
,
1.0
],
self
.
assertAllClose
(
scores
,
[[
0.7
,
0.6
]])
[
0.0
,
0.0
,
0.0
,
0.0
]]])
self
.
assertAllClose
(
classes
,
[[
1
,
2
]])
self
.
assertAllClose
(
scores_np
,
[[
0.7
,
0.6
],
self
.
assertAllClose
(
masks
,
np
.
arange
(
32
).
reshape
([
2
,
4
,
4
]))
[
0.9
,
0.0
]])
self
.
assertAllClose
(
num_detections
,
[
2
])
self
.
assertAllClose
(
classes_np
,
[[
1
,
2
],
[
2
,
1
]])
self
.
assertAllClose
(
masks_np
,
np
.
arange
(
64
).
reshape
([
2
,
2
,
4
,
4
]))
self
.
assertAllClose
(
num_detections_np
,
[
2
,
1
])
def
test_export_saved_model_and_run_inference
(
self
):
def
test_export_saved_model_and_run_inference
(
self
):
tmp_dir
=
self
.
get_temp_dir
()
tmp_dir
=
self
.
get_temp_dir
()
...
@@ -373,6 +430,8 @@ class ExportInferenceGraphTest(tf.test.TestCase):
...
@@ -373,6 +430,8 @@ class ExportInferenceGraphTest(tf.test.TestCase):
trained_checkpoint_prefix
=
trained_checkpoint_prefix
,
trained_checkpoint_prefix
=
trained_checkpoint_prefix
,
output_directory
=
output_directory
)
output_directory
=
output_directory
)
tf_example_np
=
np
.
hstack
([
self
.
_create_tf_example
(
np
.
ones
((
4
,
4
,
3
)).
astype
(
np
.
uint8
))]
*
2
)
with
tf
.
Graph
().
as_default
()
as
od_graph
:
with
tf
.
Graph
().
as_default
()
as
od_graph
:
with
self
.
test_session
(
graph
=
od_graph
)
as
sess
:
with
self
.
test_session
(
graph
=
od_graph
)
as
sess
:
tf
.
saved_model
.
loader
.
load
(
tf
.
saved_model
.
loader
.
load
(
...
@@ -383,16 +442,20 @@ class ExportInferenceGraphTest(tf.test.TestCase):
...
@@ -383,16 +442,20 @@ class ExportInferenceGraphTest(tf.test.TestCase):
classes
=
od_graph
.
get_tensor_by_name
(
'detection_classes:0'
)
classes
=
od_graph
.
get_tensor_by_name
(
'detection_classes:0'
)
masks
=
od_graph
.
get_tensor_by_name
(
'detection_masks:0'
)
masks
=
od_graph
.
get_tensor_by_name
(
'detection_masks:0'
)
num_detections
=
od_graph
.
get_tensor_by_name
(
'num_detections:0'
)
num_detections
=
od_graph
.
get_tensor_by_name
(
'num_detections:0'
)
(
boxes
,
scores
,
classes
,
masks
,
num_detections
)
=
sess
.
run
(
(
boxes_np
,
scores_np
,
classes_np
,
masks_np
,
[
boxes
,
scores
,
classes
,
masks
,
num_detections
],
num_detections_np
)
=
sess
.
run
(
feed_dict
=
{
tf_example
:
self
.
_create_tf_example
(
[
boxes
,
scores
,
classes
,
masks
,
num_detections
],
np
.
ones
((
4
,
4
,
3
)).
astype
(
np
.
uint8
))})
feed_dict
=
{
tf_example
:
tf_example_np
})
self
.
assertAllClose
(
boxes
,
[[
0.0
,
0.0
,
0.5
,
0.5
],
self
.
assertAllClose
(
boxes_np
,
[[[
0.0
,
0.0
,
0.5
,
0.5
],
[
0.5
,
0.5
,
0.8
,
0.8
]])
[
0.5
,
0.5
,
0.8
,
0.8
]],
self
.
assertAllClose
(
scores
,
[[
0.7
,
0.6
]])
[[
0.5
,
0.5
,
1.0
,
1.0
],
self
.
assertAllClose
(
classes
,
[[
1
,
2
]])
[
0.0
,
0.0
,
0.0
,
0.0
]]])
self
.
assertAllClose
(
masks
,
np
.
arange
(
32
).
reshape
([
2
,
4
,
4
]))
self
.
assertAllClose
(
scores_np
,
[[
0.7
,
0.6
],
self
.
assertAllClose
(
num_detections
,
[
2
])
[
0.9
,
0.0
]])
self
.
assertAllClose
(
classes_np
,
[[
1
,
2
],
[
2
,
1
]])
self
.
assertAllClose
(
masks_np
,
np
.
arange
(
64
).
reshape
([
2
,
2
,
4
,
4
]))
self
.
assertAllClose
(
num_detections_np
,
[
2
,
1
])
def
test_export_checkpoint_and_run_inference
(
self
):
def
test_export_checkpoint_and_run_inference
(
self
):
tmp_dir
=
self
.
get_temp_dir
()
tmp_dir
=
self
.
get_temp_dir
()
...
@@ -414,6 +477,8 @@ class ExportInferenceGraphTest(tf.test.TestCase):
...
@@ -414,6 +477,8 @@ class ExportInferenceGraphTest(tf.test.TestCase):
trained_checkpoint_prefix
=
trained_checkpoint_prefix
,
trained_checkpoint_prefix
=
trained_checkpoint_prefix
,
output_directory
=
output_directory
)
output_directory
=
output_directory
)
tf_example_np
=
np
.
hstack
([
self
.
_create_tf_example
(
np
.
ones
((
4
,
4
,
3
)).
astype
(
np
.
uint8
))]
*
2
)
with
tf
.
Graph
().
as_default
()
as
od_graph
:
with
tf
.
Graph
().
as_default
()
as
od_graph
:
with
self
.
test_session
(
graph
=
od_graph
)
as
sess
:
with
self
.
test_session
(
graph
=
od_graph
)
as
sess
:
new_saver
=
tf
.
train
.
import_meta_graph
(
meta_graph_path
)
new_saver
=
tf
.
train
.
import_meta_graph
(
meta_graph_path
)
...
@@ -425,16 +490,20 @@ class ExportInferenceGraphTest(tf.test.TestCase):
...
@@ -425,16 +490,20 @@ class ExportInferenceGraphTest(tf.test.TestCase):
classes
=
od_graph
.
get_tensor_by_name
(
'detection_classes:0'
)
classes
=
od_graph
.
get_tensor_by_name
(
'detection_classes:0'
)
masks
=
od_graph
.
get_tensor_by_name
(
'detection_masks:0'
)
masks
=
od_graph
.
get_tensor_by_name
(
'detection_masks:0'
)
num_detections
=
od_graph
.
get_tensor_by_name
(
'num_detections:0'
)
num_detections
=
od_graph
.
get_tensor_by_name
(
'num_detections:0'
)
(
boxes
,
scores
,
classes
,
masks
,
num_detections
)
=
sess
.
run
(
(
boxes_np
,
scores_np
,
classes_np
,
masks_np
,
[
boxes
,
scores
,
classes
,
masks
,
num_detections
],
num_detections_np
)
=
sess
.
run
(
feed_dict
=
{
tf_example
:
self
.
_create_tf_example
(
[
boxes
,
scores
,
classes
,
masks
,
num_detections
],
np
.
ones
((
4
,
4
,
3
)).
astype
(
np
.
uint8
))})
feed_dict
=
{
tf_example
:
tf_example_np
})
self
.
assertAllClose
(
boxes
,
[[
0.0
,
0.0
,
0.5
,
0.5
],
self
.
assertAllClose
(
boxes_np
,
[[[
0.0
,
0.0
,
0.5
,
0.5
],
[
0.5
,
0.5
,
0.8
,
0.8
]])
[
0.5
,
0.5
,
0.8
,
0.8
]],
self
.
assertAllClose
(
scores
,
[[
0.7
,
0.6
]])
[[
0.5
,
0.5
,
1.0
,
1.0
],
self
.
assertAllClose
(
classes
,
[[
1
,
2
]])
[
0.0
,
0.0
,
0.0
,
0.0
]]])
self
.
assertAllClose
(
masks
,
np
.
arange
(
32
).
reshape
([
2
,
4
,
4
]))
self
.
assertAllClose
(
scores_np
,
[[
0.7
,
0.6
],
self
.
assertAllClose
(
num_detections
,
[
2
])
[
0.9
,
0.0
]])
self
.
assertAllClose
(
classes_np
,
[[
1
,
2
],
[
2
,
1
]])
self
.
assertAllClose
(
masks_np
,
np
.
arange
(
64
).
reshape
([
2
,
2
,
4
,
4
]))
self
.
assertAllClose
(
num_detections_np
,
[
2
,
1
])
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment