Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ModelZoo
ResNet50_tensorflow
Commits
f906646c
Commit
f906646c
authored
Jun 29, 2017
by
Duc Nguyen
Committed by
GitHub
Jun 29, 2017
Browse files
Merge branch 'master' into patch-2
parents
2f3666ed
a6df5573
Changes
59
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
448 additions
and
149 deletions
+448
-149
object_detection/core/data_decoder.py
object_detection/core/data_decoder.py
+1
-2
object_detection/core/post_processing.py
object_detection/core/post_processing.py
+1
-1
object_detection/core/prefetcher.py
object_detection/core/prefetcher.py
+1
-1
object_detection/core/preprocessor.py
object_detection/core/preprocessor.py
+0
-1
object_detection/core/preprocessor_test.py
object_detection/core/preprocessor_test.py
+6
-1
object_detection/create_pascal_tf_record.py
object_detection/create_pascal_tf_record.py
+9
-7
object_detection/create_pet_tf_record.py
object_detection/create_pet_tf_record.py
+9
-7
object_detection/data_decoders/tf_example_decoder.py
object_detection/data_decoders/tf_example_decoder.py
+1
-1
object_detection/data_decoders/tf_example_decoder_test.py
object_detection/data_decoders/tf_example_decoder_test.py
+9
-9
object_detection/evaluator.py
object_detection/evaluator.py
+1
-1
object_detection/export_inference_graph.py
object_detection/export_inference_graph.py
+19
-8
object_detection/exporter.py
object_detection/exporter.py
+146
-37
object_detection/exporter_test.py
object_detection/exporter_test.py
+191
-19
object_detection/g3doc/preparing_inputs.md
object_detection/g3doc/preparing_inputs.md
+14
-14
object_detection/g3doc/running_locally.md
object_detection/g3doc/running_locally.md
+1
-1
object_detection/g3doc/running_on_cloud.md
object_detection/g3doc/running_on_cloud.md
+2
-2
object_detection/g3doc/running_pets.md
object_detection/g3doc/running_pets.md
+31
-31
object_detection/samples/configs/faster_rcnn_inception_resnet_v2_atrous_pets.config
...onfigs/faster_rcnn_inception_resnet_v2_atrous_pets.config
+1
-1
object_detection/samples/configs/faster_rcnn_resnet101_pets.config
...tection/samples/configs/faster_rcnn_resnet101_pets.config
+1
-1
object_detection/samples/configs/faster_rcnn_resnet101_voc07.config
...ection/samples/configs/faster_rcnn_resnet101_voc07.config
+4
-4
No files found.
object_detection/core/data_decoder.py
View file @
f906646c
...
@@ -26,9 +26,8 @@ class DataDecoder(object):
...
@@ -26,9 +26,8 @@ class DataDecoder(object):
"""Interface for data decoders."""
"""Interface for data decoders."""
__metaclass__
=
ABCMeta
__metaclass__
=
ABCMeta
# TODO: snake_case this method.
@
abstractmethod
@
abstractmethod
def
D
ecode
(
self
,
data
):
def
d
ecode
(
self
,
data
):
"""Return a single image and associated labels.
"""Return a single image and associated labels.
Args:
Args:
...
...
object_detection/core/post_processing.py
View file @
f906646c
...
@@ -131,7 +131,7 @@ def multiclass_non_max_suppression(boxes,
...
@@ -131,7 +131,7 @@ def multiclass_non_max_suppression(boxes,
boxlist_and_class_scores
.
add_field
(
fields
.
BoxListFields
.
masks
,
boxlist_and_class_scores
.
add_field
(
fields
.
BoxListFields
.
masks
,
per_class_masks
)
per_class_masks
)
if
additional_fields
is
not
None
:
if
additional_fields
is
not
None
:
for
key
,
tensor
in
additional_fields
.
iter
items
():
for
key
,
tensor
in
additional_fields
.
items
():
boxlist_and_class_scores
.
add_field
(
key
,
tensor
)
boxlist_and_class_scores
.
add_field
(
key
,
tensor
)
boxlist_filtered
=
box_list_ops
.
filter_greater_than
(
boxlist_filtered
=
box_list_ops
.
filter_greater_than
(
boxlist_and_class_scores
,
score_thresh
)
boxlist_and_class_scores
,
score_thresh
)
...
...
object_detection/core/prefetcher.py
View file @
f906646c
...
@@ -45,7 +45,7 @@ def prefetch(tensor_dict, capacity):
...
@@ -45,7 +45,7 @@ def prefetch(tensor_dict, capacity):
Returns:
Returns:
a FIFO prefetcher queue
a FIFO prefetcher queue
"""
"""
names
=
tensor_dict
.
keys
()
names
=
list
(
tensor_dict
.
keys
()
)
dtypes
=
[
t
.
dtype
for
t
in
tensor_dict
.
values
()]
dtypes
=
[
t
.
dtype
for
t
in
tensor_dict
.
values
()]
shapes
=
[
t
.
get_shape
()
for
t
in
tensor_dict
.
values
()]
shapes
=
[
t
.
get_shape
()
for
t
in
tensor_dict
.
values
()]
prefetch_queue
=
tf
.
PaddingFIFOQueue
(
capacity
,
dtypes
=
dtypes
,
prefetch_queue
=
tf
.
PaddingFIFOQueue
(
capacity
,
dtypes
=
dtypes
,
...
...
object_detection/core/preprocessor.py
View file @
f906646c
...
@@ -341,7 +341,6 @@ def random_pixel_value_scale(image, minval=0.9, maxval=1.1, seed=None):
...
@@ -341,7 +341,6 @@ def random_pixel_value_scale(image, minval=0.9, maxval=1.1, seed=None):
Returns:
Returns:
image: image which is the same shape as input image.
image: image which is the same shape as input image.
boxes: boxes which is the same shape as input boxes.
"""
"""
with
tf
.
name_scope
(
'RandomPixelValueScale'
,
values
=
[
image
]):
with
tf
.
name_scope
(
'RandomPixelValueScale'
,
values
=
[
image
]):
color_coef
=
tf
.
random_uniform
(
color_coef
=
tf
.
random_uniform
(
...
...
object_detection/core/preprocessor_test.py
View file @
f906646c
...
@@ -15,14 +15,19 @@
...
@@ -15,14 +15,19 @@
"""Tests for object_detection.core.preprocessor."""
"""Tests for object_detection.core.preprocessor."""
import
mock
import
numpy
as
np
import
numpy
as
np
import
six
import
tensorflow
as
tf
import
tensorflow
as
tf
from
object_detection.core
import
preprocessor
from
object_detection.core
import
preprocessor
from
object_detection.core
import
standard_fields
as
fields
from
object_detection.core
import
standard_fields
as
fields
if
six
.
PY2
:
import
mock
# pylint: disable=g-import-not-at-top
else
:
from
unittest
import
mock
# pylint: disable=g-import-not-at-top
class
PreprocessorTest
(
tf
.
test
.
TestCase
):
class
PreprocessorTest
(
tf
.
test
.
TestCase
):
...
...
object_detection/create_pascal_tf_record.py
View file @
f906646c
...
@@ -83,7 +83,7 @@ def dict_to_tf_example(data,
...
@@ -83,7 +83,7 @@ def dict_to_tf_example(data,
"""
"""
img_path
=
os
.
path
.
join
(
data
[
'folder'
],
image_subdirectory
,
data
[
'filename'
])
img_path
=
os
.
path
.
join
(
data
[
'folder'
],
image_subdirectory
,
data
[
'filename'
])
full_path
=
os
.
path
.
join
(
dataset_directory
,
img_path
)
full_path
=
os
.
path
.
join
(
dataset_directory
,
img_path
)
with
tf
.
gfile
.
GFile
(
full_path
)
as
fid
:
with
tf
.
gfile
.
GFile
(
full_path
,
'rb'
)
as
fid
:
encoded_jpg
=
fid
.
read
()
encoded_jpg
=
fid
.
read
()
encoded_jpg_io
=
io
.
BytesIO
(
encoded_jpg
)
encoded_jpg_io
=
io
.
BytesIO
(
encoded_jpg
)
image
=
PIL
.
Image
.
open
(
encoded_jpg_io
)
image
=
PIL
.
Image
.
open
(
encoded_jpg_io
)
...
@@ -114,19 +114,21 @@ def dict_to_tf_example(data,
...
@@ -114,19 +114,21 @@ def dict_to_tf_example(data,
ymin
.
append
(
float
(
obj
[
'bndbox'
][
'ymin'
])
/
height
)
ymin
.
append
(
float
(
obj
[
'bndbox'
][
'ymin'
])
/
height
)
xmax
.
append
(
float
(
obj
[
'bndbox'
][
'xmax'
])
/
width
)
xmax
.
append
(
float
(
obj
[
'bndbox'
][
'xmax'
])
/
width
)
ymax
.
append
(
float
(
obj
[
'bndbox'
][
'ymax'
])
/
height
)
ymax
.
append
(
float
(
obj
[
'bndbox'
][
'ymax'
])
/
height
)
classes_text
.
append
(
obj
[
'name'
])
classes_text
.
append
(
obj
[
'name'
]
.
encode
(
'utf8'
)
)
classes
.
append
(
label_map_dict
[
obj
[
'name'
]])
classes
.
append
(
label_map_dict
[
obj
[
'name'
]])
truncated
.
append
(
int
(
obj
[
'truncated'
]))
truncated
.
append
(
int
(
obj
[
'truncated'
]))
poses
.
append
(
obj
[
'pose'
])
poses
.
append
(
obj
[
'pose'
]
.
encode
(
'utf8'
)
)
example
=
tf
.
train
.
Example
(
features
=
tf
.
train
.
Features
(
feature
=
{
example
=
tf
.
train
.
Example
(
features
=
tf
.
train
.
Features
(
feature
=
{
'image/height'
:
dataset_util
.
int64_feature
(
height
),
'image/height'
:
dataset_util
.
int64_feature
(
height
),
'image/width'
:
dataset_util
.
int64_feature
(
width
),
'image/width'
:
dataset_util
.
int64_feature
(
width
),
'image/filename'
:
dataset_util
.
bytes_feature
(
data
[
'filename'
]),
'image/filename'
:
dataset_util
.
bytes_feature
(
'image/source_id'
:
dataset_util
.
bytes_feature
(
data
[
'filename'
]),
data
[
'filename'
].
encode
(
'utf8'
)),
'image/key/sha256'
:
dataset_util
.
bytes_feature
(
key
),
'image/source_id'
:
dataset_util
.
bytes_feature
(
data
[
'filename'
].
encode
(
'utf8'
)),
'image/key/sha256'
:
dataset_util
.
bytes_feature
(
key
.
encode
(
'utf8'
)),
'image/encoded'
:
dataset_util
.
bytes_feature
(
encoded_jpg
),
'image/encoded'
:
dataset_util
.
bytes_feature
(
encoded_jpg
),
'image/format'
:
dataset_util
.
bytes_feature
(
'jpeg'
),
'image/format'
:
dataset_util
.
bytes_feature
(
'jpeg'
.
encode
(
'utf8'
)
),
'image/object/bbox/xmin'
:
dataset_util
.
float_list_feature
(
xmin
),
'image/object/bbox/xmin'
:
dataset_util
.
float_list_feature
(
xmin
),
'image/object/bbox/xmax'
:
dataset_util
.
float_list_feature
(
xmax
),
'image/object/bbox/xmax'
:
dataset_util
.
float_list_feature
(
xmax
),
'image/object/bbox/ymin'
:
dataset_util
.
float_list_feature
(
ymin
),
'image/object/bbox/ymin'
:
dataset_util
.
float_list_feature
(
ymin
),
...
...
object_detection/create_pet_tf_record.py
View file @
f906646c
...
@@ -86,7 +86,7 @@ def dict_to_tf_example(data,
...
@@ -86,7 +86,7 @@ def dict_to_tf_example(data,
ValueError: if the image pointed to by data['filename'] is not a valid JPEG
ValueError: if the image pointed to by data['filename'] is not a valid JPEG
"""
"""
img_path
=
os
.
path
.
join
(
image_subdirectory
,
data
[
'filename'
])
img_path
=
os
.
path
.
join
(
image_subdirectory
,
data
[
'filename'
])
with
tf
.
gfile
.
GFile
(
img_path
)
as
fid
:
with
tf
.
gfile
.
GFile
(
img_path
,
'rb'
)
as
fid
:
encoded_jpg
=
fid
.
read
()
encoded_jpg
=
fid
.
read
()
encoded_jpg_io
=
io
.
BytesIO
(
encoded_jpg
)
encoded_jpg_io
=
io
.
BytesIO
(
encoded_jpg
)
image
=
PIL
.
Image
.
open
(
encoded_jpg_io
)
image
=
PIL
.
Image
.
open
(
encoded_jpg_io
)
...
@@ -118,19 +118,21 @@ def dict_to_tf_example(data,
...
@@ -118,19 +118,21 @@ def dict_to_tf_example(data,
xmax
.
append
(
float
(
obj
[
'bndbox'
][
'xmax'
])
/
width
)
xmax
.
append
(
float
(
obj
[
'bndbox'
][
'xmax'
])
/
width
)
ymax
.
append
(
float
(
obj
[
'bndbox'
][
'ymax'
])
/
height
)
ymax
.
append
(
float
(
obj
[
'bndbox'
][
'ymax'
])
/
height
)
class_name
=
get_class_name_from_filename
(
data
[
'filename'
])
class_name
=
get_class_name_from_filename
(
data
[
'filename'
])
classes_text
.
append
(
class_name
)
classes_text
.
append
(
class_name
.
encode
(
'utf8'
)
)
classes
.
append
(
label_map_dict
[
class_name
])
classes
.
append
(
label_map_dict
[
class_name
])
truncated
.
append
(
int
(
obj
[
'truncated'
]))
truncated
.
append
(
int
(
obj
[
'truncated'
]))
poses
.
append
(
obj
[
'pose'
])
poses
.
append
(
obj
[
'pose'
]
.
encode
(
'utf8'
)
)
example
=
tf
.
train
.
Example
(
features
=
tf
.
train
.
Features
(
feature
=
{
example
=
tf
.
train
.
Example
(
features
=
tf
.
train
.
Features
(
feature
=
{
'image/height'
:
dataset_util
.
int64_feature
(
height
),
'image/height'
:
dataset_util
.
int64_feature
(
height
),
'image/width'
:
dataset_util
.
int64_feature
(
width
),
'image/width'
:
dataset_util
.
int64_feature
(
width
),
'image/filename'
:
dataset_util
.
bytes_feature
(
data
[
'filename'
]),
'image/filename'
:
dataset_util
.
bytes_feature
(
'image/source_id'
:
dataset_util
.
bytes_feature
(
data
[
'filename'
]),
data
[
'filename'
].
encode
(
'utf8'
)),
'image/key/sha256'
:
dataset_util
.
bytes_feature
(
key
),
'image/source_id'
:
dataset_util
.
bytes_feature
(
data
[
'filename'
].
encode
(
'utf8'
)),
'image/key/sha256'
:
dataset_util
.
bytes_feature
(
key
.
encode
(
'utf8'
)),
'image/encoded'
:
dataset_util
.
bytes_feature
(
encoded_jpg
),
'image/encoded'
:
dataset_util
.
bytes_feature
(
encoded_jpg
),
'image/format'
:
dataset_util
.
bytes_feature
(
'jpeg'
),
'image/format'
:
dataset_util
.
bytes_feature
(
'jpeg'
.
encode
(
'utf8'
)
),
'image/object/bbox/xmin'
:
dataset_util
.
float_list_feature
(
xmin
),
'image/object/bbox/xmin'
:
dataset_util
.
float_list_feature
(
xmin
),
'image/object/bbox/xmax'
:
dataset_util
.
float_list_feature
(
xmax
),
'image/object/bbox/xmax'
:
dataset_util
.
float_list_feature
(
xmax
),
'image/object/bbox/ymin'
:
dataset_util
.
float_list_feature
(
ymin
),
'image/object/bbox/ymin'
:
dataset_util
.
float_list_feature
(
ymin
),
...
...
object_detection/data_decoders/tf_example_decoder.py
View file @
f906646c
...
@@ -82,7 +82,7 @@ class TfExampleDecoder(data_decoder.DataDecoder):
...
@@ -82,7 +82,7 @@ class TfExampleDecoder(data_decoder.DataDecoder):
slim_example_decoder
.
Tensor
(
'image/segmentation/object/class'
)),
slim_example_decoder
.
Tensor
(
'image/segmentation/object/class'
)),
}
}
def
D
ecode
(
self
,
tf_example_string_tensor
):
def
d
ecode
(
self
,
tf_example_string_tensor
):
"""Decodes serialized tensorflow example and returns a tensor dictionary.
"""Decodes serialized tensorflow example and returns a tensor dictionary.
Args:
Args:
...
...
object_detection/data_decoders/tf_example_decoder_test.py
View file @
f906646c
...
@@ -64,7 +64,7 @@ class TfExampleDecoderTest(tf.test.TestCase):
...
@@ -64,7 +64,7 @@ class TfExampleDecoderTest(tf.test.TestCase):
})).
SerializeToString
()
})).
SerializeToString
()
example_decoder
=
tf_example_decoder
.
TfExampleDecoder
()
example_decoder
=
tf_example_decoder
.
TfExampleDecoder
()
tensor_dict
=
example_decoder
.
D
ecode
(
tf
.
convert_to_tensor
(
example
))
tensor_dict
=
example_decoder
.
d
ecode
(
tf
.
convert_to_tensor
(
example
))
self
.
assertAllEqual
((
tensor_dict
[
fields
.
InputDataFields
.
image
].
self
.
assertAllEqual
((
tensor_dict
[
fields
.
InputDataFields
.
image
].
get_shape
().
as_list
()),
[
None
,
None
,
3
])
get_shape
().
as_list
()),
[
None
,
None
,
3
])
...
@@ -84,7 +84,7 @@ class TfExampleDecoderTest(tf.test.TestCase):
...
@@ -84,7 +84,7 @@ class TfExampleDecoderTest(tf.test.TestCase):
})).
SerializeToString
()
})).
SerializeToString
()
example_decoder
=
tf_example_decoder
.
TfExampleDecoder
()
example_decoder
=
tf_example_decoder
.
TfExampleDecoder
()
tensor_dict
=
example_decoder
.
D
ecode
(
tf
.
convert_to_tensor
(
example
))
tensor_dict
=
example_decoder
.
d
ecode
(
tf
.
convert_to_tensor
(
example
))
with
self
.
test_session
()
as
sess
:
with
self
.
test_session
()
as
sess
:
tensor_dict
=
sess
.
run
(
tensor_dict
)
tensor_dict
=
sess
.
run
(
tensor_dict
)
...
@@ -103,7 +103,7 @@ class TfExampleDecoderTest(tf.test.TestCase):
...
@@ -103,7 +103,7 @@ class TfExampleDecoderTest(tf.test.TestCase):
})).
SerializeToString
()
})).
SerializeToString
()
example_decoder
=
tf_example_decoder
.
TfExampleDecoder
()
example_decoder
=
tf_example_decoder
.
TfExampleDecoder
()
tensor_dict
=
example_decoder
.
D
ecode
(
tf
.
convert_to_tensor
(
example
))
tensor_dict
=
example_decoder
.
d
ecode
(
tf
.
convert_to_tensor
(
example
))
self
.
assertAllEqual
((
tensor_dict
[
fields
.
InputDataFields
.
image
].
self
.
assertAllEqual
((
tensor_dict
[
fields
.
InputDataFields
.
image
].
get_shape
().
as_list
()),
[
None
,
None
,
3
])
get_shape
().
as_list
()),
[
None
,
None
,
3
])
...
@@ -130,7 +130,7 @@ class TfExampleDecoderTest(tf.test.TestCase):
...
@@ -130,7 +130,7 @@ class TfExampleDecoderTest(tf.test.TestCase):
})).
SerializeToString
()
})).
SerializeToString
()
example_decoder
=
tf_example_decoder
.
TfExampleDecoder
()
example_decoder
=
tf_example_decoder
.
TfExampleDecoder
()
tensor_dict
=
example_decoder
.
D
ecode
(
tf
.
convert_to_tensor
(
example
))
tensor_dict
=
example_decoder
.
d
ecode
(
tf
.
convert_to_tensor
(
example
))
self
.
assertAllEqual
((
tensor_dict
[
fields
.
InputDataFields
.
groundtruth_boxes
].
self
.
assertAllEqual
((
tensor_dict
[
fields
.
InputDataFields
.
groundtruth_boxes
].
get_shape
().
as_list
()),
[
None
,
4
])
get_shape
().
as_list
()),
[
None
,
4
])
...
@@ -153,7 +153,7 @@ class TfExampleDecoderTest(tf.test.TestCase):
...
@@ -153,7 +153,7 @@ class TfExampleDecoderTest(tf.test.TestCase):
})).
SerializeToString
()
})).
SerializeToString
()
example_decoder
=
tf_example_decoder
.
TfExampleDecoder
()
example_decoder
=
tf_example_decoder
.
TfExampleDecoder
()
tensor_dict
=
example_decoder
.
D
ecode
(
tf
.
convert_to_tensor
(
example
))
tensor_dict
=
example_decoder
.
d
ecode
(
tf
.
convert_to_tensor
(
example
))
self
.
assertAllEqual
((
tensor_dict
[
self
.
assertAllEqual
((
tensor_dict
[
fields
.
InputDataFields
.
groundtruth_classes
].
get_shape
().
as_list
()),
fields
.
InputDataFields
.
groundtruth_classes
].
get_shape
().
as_list
()),
...
@@ -176,7 +176,7 @@ class TfExampleDecoderTest(tf.test.TestCase):
...
@@ -176,7 +176,7 @@ class TfExampleDecoderTest(tf.test.TestCase):
})).
SerializeToString
()
})).
SerializeToString
()
example_decoder
=
tf_example_decoder
.
TfExampleDecoder
()
example_decoder
=
tf_example_decoder
.
TfExampleDecoder
()
tensor_dict
=
example_decoder
.
D
ecode
(
tf
.
convert_to_tensor
(
example
))
tensor_dict
=
example_decoder
.
d
ecode
(
tf
.
convert_to_tensor
(
example
))
self
.
assertAllEqual
((
tensor_dict
[
fields
.
InputDataFields
.
groundtruth_area
].
self
.
assertAllEqual
((
tensor_dict
[
fields
.
InputDataFields
.
groundtruth_area
].
get_shape
().
as_list
()),
[
None
])
get_shape
().
as_list
()),
[
None
])
...
@@ -197,7 +197,7 @@ class TfExampleDecoderTest(tf.test.TestCase):
...
@@ -197,7 +197,7 @@ class TfExampleDecoderTest(tf.test.TestCase):
})).
SerializeToString
()
})).
SerializeToString
()
example_decoder
=
tf_example_decoder
.
TfExampleDecoder
()
example_decoder
=
tf_example_decoder
.
TfExampleDecoder
()
tensor_dict
=
example_decoder
.
D
ecode
(
tf
.
convert_to_tensor
(
example
))
tensor_dict
=
example_decoder
.
d
ecode
(
tf
.
convert_to_tensor
(
example
))
self
.
assertAllEqual
((
tensor_dict
[
self
.
assertAllEqual
((
tensor_dict
[
fields
.
InputDataFields
.
groundtruth_is_crowd
].
get_shape
().
as_list
()),
fields
.
InputDataFields
.
groundtruth_is_crowd
].
get_shape
().
as_list
()),
...
@@ -220,7 +220,7 @@ class TfExampleDecoderTest(tf.test.TestCase):
...
@@ -220,7 +220,7 @@ class TfExampleDecoderTest(tf.test.TestCase):
})).
SerializeToString
()
})).
SerializeToString
()
example_decoder
=
tf_example_decoder
.
TfExampleDecoder
()
example_decoder
=
tf_example_decoder
.
TfExampleDecoder
()
tensor_dict
=
example_decoder
.
D
ecode
(
tf
.
convert_to_tensor
(
example
))
tensor_dict
=
example_decoder
.
d
ecode
(
tf
.
convert_to_tensor
(
example
))
self
.
assertAllEqual
((
tensor_dict
[
self
.
assertAllEqual
((
tensor_dict
[
fields
.
InputDataFields
.
groundtruth_difficult
].
get_shape
().
as_list
()),
fields
.
InputDataFields
.
groundtruth_difficult
].
get_shape
().
as_list
()),
...
@@ -263,7 +263,7 @@ class TfExampleDecoderTest(tf.test.TestCase):
...
@@ -263,7 +263,7 @@ class TfExampleDecoderTest(tf.test.TestCase):
'image/segmentation/object/class'
:
self
.
_Int64Feature
(
'image/segmentation/object/class'
:
self
.
_Int64Feature
(
instance_segmentation_classes
)})).
SerializeToString
()
instance_segmentation_classes
)})).
SerializeToString
()
example_decoder
=
tf_example_decoder
.
TfExampleDecoder
()
example_decoder
=
tf_example_decoder
.
TfExampleDecoder
()
tensor_dict
=
example_decoder
.
D
ecode
(
tf
.
convert_to_tensor
(
example
))
tensor_dict
=
example_decoder
.
d
ecode
(
tf
.
convert_to_tensor
(
example
))
self
.
assertAllEqual
((
self
.
assertAllEqual
((
tensor_dict
[
fields
.
InputDataFields
.
groundtruth_instance_masks
].
tensor_dict
[
fields
.
InputDataFields
.
groundtruth_instance_masks
].
...
...
object_detection/evaluator.py
View file @
f906646c
...
@@ -154,7 +154,7 @@ def evaluate(create_input_dict_fn, create_model_fn, eval_config, categories,
...
@@ -154,7 +154,7 @@ def evaluate(create_input_dict_fn, create_model_fn, eval_config, categories,
"""
"""
if
batch_index
>=
eval_config
.
num_visualizations
:
if
batch_index
>=
eval_config
.
num_visualizations
:
if
'original_image'
in
tensor_dict
:
if
'original_image'
in
tensor_dict
:
tensor_dict
=
{
k
:
v
for
(
k
,
v
)
in
tensor_dict
.
iter
items
()
tensor_dict
=
{
k
:
v
for
(
k
,
v
)
in
tensor_dict
.
items
()
if
k
!=
'original_image'
}
if
k
!=
'original_image'
}
try
:
try
:
(
result_dict
,
_
)
=
sess
.
run
([
tensor_dict
,
update_op
])
(
result_dict
,
_
)
=
sess
.
run
([
tensor_dict
,
update_op
])
...
...
object_detection/export_inference_graph.py
View file @
f906646c
...
@@ -16,23 +16,30 @@
...
@@ -16,23 +16,30 @@
r
"""Tool to export an object detection model for inference.
r
"""Tool to export an object detection model for inference.
Prepares an object detection tensorflow graph for inference using model
Prepares an object detection tensorflow graph for inference using model
configuration and an optional trained checkpoint.
configuration and an optional trained checkpoint. Outputs either an inference
graph or a SavedModel (https://tensorflow.github.io/serving/serving_basic.html).
The inference graph contains one of t
wo
input nodes depending on the user
The inference graph contains one of t
hree
input nodes depending on the user
specified option.
specified option.
* `image_tensor`: Accepts a uint8 4-D tensor of shape [1, None, None, 3]
* `image_tensor`: Accepts a uint8 4-D tensor of shape [1, None, None, 3]
* `encoded_image_string_tensor`: Accepts a scalar string tensor of encoded PNG
or JPEG image.
* `tf_example`: Accepts a serialized TFExample proto. The batch size in this
* `tf_example`: Accepts a serialized TFExample proto. The batch size in this
case is always 1.
case is always 1.
and the following output nodes:
and the following output nodes
returned by the model.postprocess(..)
:
* `num_detections`
: Outputs float32 tensors of the form [batch]
* `num_detections`: Outputs float32 tensors of the form [batch]
that specifies the number of valid boxes per image in the batch.
that specifies the number of valid boxes per image in the batch.
* `detection_boxes`
: Outputs float32 tensors of the form
* `detection_boxes`: Outputs float32 tensors of the form
[batch, num_boxes, 4] containing detected boxes.
[batch, num_boxes, 4] containing detected boxes.
* `detection_scores`
: Outputs float32 tensors of the form
* `detection_scores`: Outputs float32 tensors of the form
[batch, num_boxes] containing class scores for the detections.
[batch, num_boxes] containing class scores for the detections.
* `detection_classes`: Outputs float32 tensors of the form
* `detection_classes`: Outputs float32 tensors of the form
[batch, num_boxes] containing classes for the detections.
[batch, num_boxes] containing classes for the detections.
* `detection_masks`: Outputs float32 tensors of the form
[batch, num_boxes, mask_height, mask_width] containing predicted instance
masks for each box if its present in the dictionary of postprocessed
tensors returned by the model.
Note that currently `batch` is always 1, but we will support `batch` > 1 in
Note that currently `batch` is always 1, but we will support `batch` > 1 in
the future.
the future.
...
@@ -61,7 +68,8 @@ slim = tf.contrib.slim
...
@@ -61,7 +68,8 @@ slim = tf.contrib.slim
flags
=
tf
.
app
.
flags
flags
=
tf
.
app
.
flags
flags
.
DEFINE_string
(
'input_type'
,
'image_tensor'
,
'Type of input node. Can be '
flags
.
DEFINE_string
(
'input_type'
,
'image_tensor'
,
'Type of input node. Can be '
'one of [`image_tensor` `tf_example_proto`]'
)
'one of [`image_tensor`, `encoded_image_string_tensor`, '
'`tf_example`]'
)
flags
.
DEFINE_string
(
'pipeline_config_path'
,
''
,
flags
.
DEFINE_string
(
'pipeline_config_path'
,
''
,
'Path to a pipeline_pb2.TrainEvalPipelineConfig config '
'Path to a pipeline_pb2.TrainEvalPipelineConfig config '
'file.'
)
'file.'
)
...
@@ -70,6 +78,8 @@ flags.DEFINE_string('checkpoint_path', '', 'Optional path to checkpoint file. '
...
@@ -70,6 +78,8 @@ flags.DEFINE_string('checkpoint_path', '', 'Optional path to checkpoint file. '
'the graph.'
)
'the graph.'
)
flags
.
DEFINE_string
(
'inference_graph_path'
,
''
,
'Path to write the output '
flags
.
DEFINE_string
(
'inference_graph_path'
,
''
,
'Path to write the output '
'inference graph.'
)
'inference graph.'
)
flags
.
DEFINE_bool
(
'export_as_saved_model'
,
False
,
'Whether the exported graph '
'should be saved as a SavedModel'
)
FLAGS
=
flags
.
FLAGS
FLAGS
=
flags
.
FLAGS
...
@@ -83,7 +93,8 @@ def main(_):
...
@@ -83,7 +93,8 @@ def main(_):
text_format
.
Merge
(
f
.
read
(),
pipeline_config
)
text_format
.
Merge
(
f
.
read
(),
pipeline_config
)
exporter
.
export_inference_graph
(
FLAGS
.
input_type
,
pipeline_config
,
exporter
.
export_inference_graph
(
FLAGS
.
input_type
,
pipeline_config
,
FLAGS
.
checkpoint_path
,
FLAGS
.
checkpoint_path
,
FLAGS
.
inference_graph_path
)
FLAGS
.
inference_graph_path
,
FLAGS
.
export_as_saved_model
)
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
...
...
object_detection/exporter.py
View file @
f906646c
...
@@ -22,6 +22,7 @@ from tensorflow.python.client import session
...
@@ -22,6 +22,7 @@ from tensorflow.python.client import session
from
tensorflow.python.framework
import
graph_util
from
tensorflow.python.framework
import
graph_util
from
tensorflow.python.framework
import
importer
from
tensorflow.python.framework
import
importer
from
tensorflow.python.platform
import
gfile
from
tensorflow.python.platform
import
gfile
from
tensorflow.python.saved_model
import
signature_constants
from
tensorflow.python.training
import
saver
as
saver_lib
from
tensorflow.python.training
import
saver
as
saver_lib
from
object_detection.builders
import
model_builder
from
object_detection.builders
import
model_builder
from
object_detection.core
import
standard_fields
as
fields
from
object_detection.core
import
standard_fields
as
fields
...
@@ -30,8 +31,8 @@ from object_detection.data_decoders import tf_example_decoder
...
@@ -30,8 +31,8 @@ from object_detection.data_decoders import tf_example_decoder
slim
=
tf
.
contrib
.
slim
slim
=
tf
.
contrib
.
slim
# TODO: Replace with freeze_graph.freeze_graph_with_def_protos when
newer
# TODO: Replace with freeze_graph.freeze_graph_with_def_protos when
# version of Tensorflow becomes more common.
#
newer
version of Tensorflow becomes more common.
def
freeze_graph_with_def_protos
(
def
freeze_graph_with_def_protos
(
input_graph_def
,
input_graph_def
,
input_saver_def
,
input_saver_def
,
...
@@ -39,7 +40,6 @@ def freeze_graph_with_def_protos(
...
@@ -39,7 +40,6 @@ def freeze_graph_with_def_protos(
output_node_names
,
output_node_names
,
restore_op_name
,
restore_op_name
,
filename_tensor_name
,
filename_tensor_name
,
output_graph
,
clear_devices
,
clear_devices
,
initializer_nodes
,
initializer_nodes
,
variable_names_blacklist
=
''
):
variable_names_blacklist
=
''
):
...
@@ -48,12 +48,12 @@ def freeze_graph_with_def_protos(
...
@@ -48,12 +48,12 @@ def freeze_graph_with_def_protos(
# 'input_checkpoint' may be a prefix if we're using Saver V2 format
# 'input_checkpoint' may be a prefix if we're using Saver V2 format
if
not
saver_lib
.
checkpoint_exists
(
input_checkpoint
):
if
not
saver_lib
.
checkpoint_exists
(
input_checkpoint
):
logging
.
info
(
'Input checkpoint "'
+
input_checkpoint
+
'" does not exist!'
)
raise
ValueError
(
return
-
1
'Input checkpoint "'
+
input_checkpoint
+
'" does not exist!'
)
if
not
output_node_names
:
if
not
output_node_names
:
logging
.
info
(
'You must supply the name of a node to --output_node_names.'
)
raise
ValueError
(
return
-
1
'You must supply the name of a node to --output_node_names.'
)
# Remove all the explicit device specifications for this node. This helps to
# Remove all the explicit device specifications for this node. This helps to
# make the graph more portable.
# make the graph more portable.
...
@@ -92,16 +92,37 @@ def freeze_graph_with_def_protos(
...
@@ -92,16 +92,37 @@ def freeze_graph_with_def_protos(
output_node_names
.
split
(
','
),
output_node_names
.
split
(
','
),
variable_names_blacklist
=
variable_names_blacklist
)
variable_names_blacklist
=
variable_names_blacklist
)
with
gfile
.
GFile
(
output_graph
,
'wb'
)
as
f
:
return
output_graph_def
f
.
write
(
output_graph_def
.
SerializeToString
())
logging
.
info
(
'%d ops in the final graph.'
,
len
(
output_graph_def
.
node
))
def
get_frozen_graph_def
(
inference_graph_def
,
use_moving_averages
,
input_checkpoint
,
output_node_names
):
"""Freezes all variables in a graph definition."""
saver
=
None
if
use_moving_averages
:
variable_averages
=
tf
.
train
.
ExponentialMovingAverage
(
0.0
)
variables_to_restore
=
variable_averages
.
variables_to_restore
()
saver
=
tf
.
train
.
Saver
(
variables_to_restore
)
else
:
saver
=
tf
.
train
.
Saver
()
frozen_graph_def
=
freeze_graph_with_def_protos
(
input_graph_def
=
inference_graph_def
,
input_saver_def
=
saver
.
as_saver_def
(),
input_checkpoint
=
input_checkpoint
,
output_node_names
=
output_node_names
,
restore_op_name
=
'save/restore_all'
,
filename_tensor_name
=
'save/Const:0'
,
clear_devices
=
True
,
initializer_nodes
=
''
)
return
frozen_graph_def
# TODO: Support batch tf example inputs.
# TODO: Support batch tf example inputs.
def
_tf_example_input_placeholder
():
def
_tf_example_input_placeholder
():
tf_example_placeholder
=
tf
.
placeholder
(
tf_example_placeholder
=
tf
.
placeholder
(
tf
.
string
,
shape
=
[],
name
=
'tf_example'
)
tf
.
string
,
shape
=
[],
name
=
'tf_example'
)
tensor_dict
=
tf_example_decoder
.
TfExampleDecoder
().
D
ecode
(
tensor_dict
=
tf_example_decoder
.
TfExampleDecoder
().
d
ecode
(
tf_example_placeholder
)
tf_example_placeholder
)
image
=
tensor_dict
[
fields
.
InputDataFields
.
image
]
image
=
tensor_dict
[
fields
.
InputDataFields
.
image
]
return
tf
.
expand_dims
(
image
,
axis
=
0
)
return
tf
.
expand_dims
(
image
,
axis
=
0
)
...
@@ -112,9 +133,21 @@ def _image_tensor_input_placeholder():
...
@@ -112,9 +133,21 @@ def _image_tensor_input_placeholder():
shape
=
(
1
,
None
,
None
,
3
),
shape
=
(
1
,
None
,
None
,
3
),
name
=
'image_tensor'
)
name
=
'image_tensor'
)
def
_encoded_image_string_tensor_input_placeholder
():
image_str
=
tf
.
placeholder
(
dtype
=
tf
.
string
,
shape
=
[],
name
=
'encoded_image_string_tensor'
)
image_tensor
=
tf
.
image
.
decode_image
(
image_str
,
channels
=
3
)
image_tensor
.
set_shape
((
None
,
None
,
3
))
return
tf
.
expand_dims
(
image_tensor
,
axis
=
0
)
input_placeholder_fn_map
=
{
input_placeholder_fn_map
=
{
'image_tensor'
:
_image_tensor_input_placeholder
,
'encoded_image_string_tensor'
:
_encoded_image_string_tensor_input_placeholder
,
'tf_example'
:
_tf_example_input_placeholder
,
'tf_example'
:
_tf_example_input_placeholder
,
'image_tensor'
:
_image_tensor_input_placeholder
}
}
...
@@ -129,23 +162,36 @@ def _add_output_tensor_nodes(postprocessed_tensors):
...
@@ -129,23 +162,36 @@ def _add_output_tensor_nodes(postprocessed_tensors):
containing scores for the detected boxes.
containing scores for the detected boxes.
* detection_classes: float32 tensor of shape [batch_size, num_boxes]
* detection_classes: float32 tensor of shape [batch_size, num_boxes]
containing class predictions for the detected boxes.
containing class predictions for the detected boxes.
* detection_masks: (Optional) float32 tensor of shape
[batch_size, num_boxes, mask_height, mask_width] containing masks for each
detection box.
Args:
Args:
postprocessed_tensors: a dictionary containing the following fields
postprocessed_tensors: a dictionary containing the following fields
'detection_boxes': [batch, max_detections, 4]
'detection_boxes': [batch, max_detections, 4]
'detection_scores': [batch, max_detections]
'detection_scores': [batch, max_detections]
'detection_classes': [batch, max_detections]
'detection_classes': [batch, max_detections]
'detection_masks': [batch, max_detections, mask_height, mask_width]
(optional).
'num_detections': [batch]
'num_detections': [batch]
Returns:
A tensor dict containing the added output tensor nodes.
"""
"""
label_id_offset
=
1
label_id_offset
=
1
boxes
=
postprocessed_tensors
.
get
(
'detection_boxes'
)
boxes
=
postprocessed_tensors
.
get
(
'detection_boxes'
)
scores
=
postprocessed_tensors
.
get
(
'detection_scores'
)
scores
=
postprocessed_tensors
.
get
(
'detection_scores'
)
classes
=
postprocessed_tensors
.
get
(
'detection_classes'
)
+
label_id_offset
classes
=
postprocessed_tensors
.
get
(
'detection_classes'
)
+
label_id_offset
masks
=
postprocessed_tensors
.
get
(
'detection_masks'
)
num_detections
=
postprocessed_tensors
.
get
(
'num_detections'
)
num_detections
=
postprocessed_tensors
.
get
(
'num_detections'
)
tf
.
identity
(
boxes
,
name
=
'detection_boxes'
)
outputs
=
{}
tf
.
identity
(
scores
,
name
=
'detection_scores'
)
outputs
[
'detection_boxes'
]
=
tf
.
identity
(
boxes
,
name
=
'detection_boxes'
)
tf
.
identity
(
classes
,
name
=
'detection_classes'
)
outputs
[
'detection_scores'
]
=
tf
.
identity
(
scores
,
name
=
'detection_scores'
)
tf
.
identity
(
num_detections
,
name
=
'num_detections'
)
outputs
[
'detection_classes'
]
=
tf
.
identity
(
classes
,
name
=
'detection_classes'
)
outputs
[
'num_detections'
]
=
tf
.
identity
(
num_detections
,
name
=
'num_detections'
)
if
masks
is
not
None
:
outputs
[
'detection_masks'
]
=
tf
.
identity
(
masks
,
name
=
'detection_masks'
)
return
outputs
def
_write_inference_graph
(
inference_graph_path
,
def
_write_inference_graph
(
inference_graph_path
,
...
@@ -172,23 +218,17 @@ def _write_inference_graph(inference_graph_path,
...
@@ -172,23 +218,17 @@ def _write_inference_graph(inference_graph_path,
"""
"""
inference_graph_def
=
tf
.
get_default_graph
().
as_graph_def
()
inference_graph_def
=
tf
.
get_default_graph
().
as_graph_def
()
if
checkpoint_path
:
if
checkpoint_path
:
saver
=
None
output_graph_def
=
get_frozen_graph_def
(
if
use_moving_averages
:
inference_graph_def
=
inference_graph_def
,
variable_averages
=
tf
.
train
.
ExponentialMovingAverage
(
0.0
)
use_moving_averages
=
use_moving_averages
,
variables_to_restore
=
variable_averages
.
variables_to_restore
()
saver
=
tf
.
train
.
Saver
(
variables_to_restore
)
else
:
saver
=
tf
.
train
.
Saver
()
freeze_graph_with_def_protos
(
input_graph_def
=
inference_graph_def
,
input_saver_def
=
saver
.
as_saver_def
(),
input_checkpoint
=
checkpoint_path
,
input_checkpoint
=
checkpoint_path
,
output_node_names
=
output_node_names
,
output_node_names
=
output_node_names
,
restore_op_name
=
'save/restore_all'
,
)
filename_tensor_name
=
'save/Const:0'
,
output_graph
=
inference_graph_path
,
with
gfile
.
GFile
(
inference_graph_path
,
'wb'
)
as
f
:
clear_devices
=
True
,
f
.
write
(
output_graph_def
.
SerializeToString
())
initializer_nodes
=
''
)
logging
.
info
(
'%d ops in the final graph.'
,
len
(
output_graph_def
.
node
))
return
return
tf
.
train
.
write_graph
(
inference_graph_def
,
tf
.
train
.
write_graph
(
inference_graph_def
,
os
.
path
.
dirname
(
inference_graph_path
),
os
.
path
.
dirname
(
inference_graph_path
),
...
@@ -196,24 +236,90 @@ def _write_inference_graph(inference_graph_path,
...
@@ -196,24 +236,90 @@ def _write_inference_graph(inference_graph_path,
as_text
=
False
)
as_text
=
False
)
def
_write_saved_model
(
inference_graph_path
,
inputs
,
outputs
,
checkpoint_path
=
None
,
use_moving_averages
=
False
):
"""Writes SavedModel to disk.
If checkpoint_path is not None bakes the weights into the graph thereby
eliminating the need of checkpoint files during inference. If the model
was trained with moving averages, setting use_moving_averages to true
restores the moving averages, otherwise the original set of variables
is restored.
Args:
inference_graph_path: Path to write inference graph.
inputs: The input image tensor to use for detection.
outputs: A tensor dictionary containing the outputs of a DetectionModel.
checkpoint_path: Optional path to the checkpoint file.
use_moving_averages: Whether to export the original or the moving averages
of the trainable variables from the checkpoint.
"""
inference_graph_def
=
tf
.
get_default_graph
().
as_graph_def
()
checkpoint_graph_def
=
None
if
checkpoint_path
:
output_node_names
=
','
.
join
(
outputs
.
keys
())
checkpoint_graph_def
=
get_frozen_graph_def
(
inference_graph_def
=
inference_graph_def
,
use_moving_averages
=
use_moving_averages
,
input_checkpoint
=
checkpoint_path
,
output_node_names
=
output_node_names
)
with
tf
.
Graph
().
as_default
():
with
session
.
Session
()
as
sess
:
tf
.
import_graph_def
(
checkpoint_graph_def
)
builder
=
tf
.
saved_model
.
builder
.
SavedModelBuilder
(
inference_graph_path
)
tensor_info_inputs
=
{
'inputs'
:
tf
.
saved_model
.
utils
.
build_tensor_info
(
inputs
)}
tensor_info_outputs
=
{}
for
k
,
v
in
outputs
.
items
():
tensor_info_outputs
[
k
]
=
tf
.
saved_model
.
utils
.
build_tensor_info
(
v
)
detection_signature
=
(
tf
.
saved_model
.
signature_def_utils
.
build_signature_def
(
inputs
=
tensor_info_inputs
,
outputs
=
tensor_info_outputs
,
method_name
=
signature_constants
.
PREDICT_METHOD_NAME
))
builder
.
add_meta_graph_and_variables
(
sess
,
[
tf
.
saved_model
.
tag_constants
.
SERVING
],
signature_def_map
=
{
'signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY'
:
detection_signature
,
},
)
builder
.
save
()
def
_export_inference_graph
(
input_type
,
def
_export_inference_graph
(
input_type
,
detection_model
,
detection_model
,
use_moving_averages
,
use_moving_averages
,
checkpoint_path
,
checkpoint_path
,
inference_graph_path
):
inference_graph_path
,
export_as_saved_model
=
False
):
"""Export helper."""
if
input_type
not
in
input_placeholder_fn_map
:
if
input_type
not
in
input_placeholder_fn_map
:
raise
ValueError
(
'Unknown input type: {}'
.
format
(
input_type
))
raise
ValueError
(
'Unknown input type: {}'
.
format
(
input_type
))
inputs
=
tf
.
to_float
(
input_placeholder_fn_map
[
input_type
]())
inputs
=
tf
.
to_float
(
input_placeholder_fn_map
[
input_type
]())
preprocessed_inputs
=
detection_model
.
preprocess
(
inputs
)
preprocessed_inputs
=
detection_model
.
preprocess
(
inputs
)
output_tensors
=
detection_model
.
predict
(
preprocessed_inputs
)
output_tensors
=
detection_model
.
predict
(
preprocessed_inputs
)
postprocessed_tensors
=
detection_model
.
postprocess
(
output_tensors
)
postprocessed_tensors
=
detection_model
.
postprocess
(
output_tensors
)
_add_output_tensor_nodes
(
postprocessed_tensors
)
outputs
=
_add_output_tensor_nodes
(
postprocessed_tensors
)
_write_inference_graph
(
inference_graph_path
,
checkpoint_path
,
out_node_names
=
list
(
outputs
.
keys
())
use_moving_averages
)
if
export_as_saved_model
:
_write_saved_model
(
inference_graph_path
,
inputs
,
outputs
,
checkpoint_path
,
use_moving_averages
)
else
:
_write_inference_graph
(
inference_graph_path
,
checkpoint_path
,
use_moving_averages
,
output_node_names
=
','
.
join
(
out_node_names
))
def
export_inference_graph
(
input_type
,
pipeline_config
,
checkpoint_path
,
def
export_inference_graph
(
input_type
,
pipeline_config
,
checkpoint_path
,
inference_graph_path
):
inference_graph_path
,
export_as_saved_model
=
False
):
"""Exports inference graph for the model specified in the pipeline config.
"""Exports inference graph for the model specified in the pipeline config.
Args:
Args:
...
@@ -222,9 +328,12 @@ def export_inference_graph(input_type, pipeline_config, checkpoint_path,
...
@@ -222,9 +328,12 @@ def export_inference_graph(input_type, pipeline_config, checkpoint_path,
pipeline_config: pipeline_pb2.TrainAndEvalPipelineConfig proto.
pipeline_config: pipeline_pb2.TrainAndEvalPipelineConfig proto.
checkpoint_path: Path to the checkpoint file to freeze.
checkpoint_path: Path to the checkpoint file to freeze.
inference_graph_path: Path to write inference graph to.
inference_graph_path: Path to write inference graph to.
export_as_saved_model: If the model should be exported as a SavedModel. If
false, it is saved as an inference graph.
"""
"""
detection_model
=
model_builder
.
build
(
pipeline_config
.
model
,
detection_model
=
model_builder
.
build
(
pipeline_config
.
model
,
is_training
=
False
)
is_training
=
False
)
_export_inference_graph
(
input_type
,
detection_model
,
_export_inference_graph
(
input_type
,
detection_model
,
pipeline_config
.
eval_config
.
use_moving_averages
,
pipeline_config
.
eval_config
.
use_moving_averages
,
checkpoint_path
,
inference_graph_path
)
checkpoint_path
,
inference_graph_path
,
export_as_saved_model
)
object_detection/exporter_test.py
View file @
f906646c
...
@@ -15,35 +15,44 @@
...
@@ -15,35 +15,44 @@
"""Tests for object_detection.export_inference_graph."""
"""Tests for object_detection.export_inference_graph."""
import
os
import
os
import
mock
import
numpy
as
np
import
numpy
as
np
import
six
import
tensorflow
as
tf
import
tensorflow
as
tf
from
object_detection
import
exporter
from
object_detection
import
exporter
from
object_detection.builders
import
model_builder
from
object_detection.builders
import
model_builder
from
object_detection.core
import
model
from
object_detection.core
import
model
from
object_detection.protos
import
pipeline_pb2
from
object_detection.protos
import
pipeline_pb2
if
six
.
PY2
:
import
mock
# pylint: disable=g-import-not-at-top
else
:
from
unittest
import
mock
# pylint: disable=g-import-not-at-top
class
FakeModel
(
model
.
DetectionModel
):
class
FakeModel
(
model
.
DetectionModel
):
def
__init__
(
self
,
add_detection_masks
=
False
):
self
.
_add_detection_masks
=
add_detection_masks
def
preprocess
(
self
,
inputs
):
def
preprocess
(
self
,
inputs
):
return
(
tf
.
identity
(
inputs
)
*
return
tf
.
identity
(
inputs
)
tf
.
get_variable
(
'dummy'
,
shape
=
(),
initializer
=
tf
.
constant_initializer
(
2
),
dtype
=
tf
.
float32
))
def
predict
(
self
,
preprocessed_inputs
):
def
predict
(
self
,
preprocessed_inputs
):
return
{
'image'
:
tf
.
identity
(
preprocessed_inputs
)}
return
{
'image'
:
tf
.
layers
.
conv2d
(
preprocessed_inputs
,
3
,
1
)}
def
postprocess
(
self
,
prediction_dict
):
def
postprocess
(
self
,
prediction_dict
):
with
tf
.
control_dependencies
(
prediction_dict
.
values
()):
with
tf
.
control_dependencies
(
prediction_dict
.
values
()):
return
{
postprocessed_tensors
=
{
'detection_boxes'
:
tf
.
constant
([[
0.0
,
0.0
,
0.5
,
0.5
],
'detection_boxes'
:
tf
.
constant
([[
0.0
,
0.0
,
0.5
,
0.5
],
[
0.5
,
0.5
,
0.8
,
0.8
]],
tf
.
float32
),
[
0.5
,
0.5
,
0.8
,
0.8
]],
tf
.
float32
),
'detection_scores'
:
tf
.
constant
([[
0.7
,
0.6
]],
tf
.
float32
),
'detection_scores'
:
tf
.
constant
([[
0.7
,
0.6
]],
tf
.
float32
),
'detection_classes'
:
tf
.
constant
([[
0
,
1
]],
tf
.
float32
),
'detection_classes'
:
tf
.
constant
([[
0
,
1
]],
tf
.
float32
),
'num_detections'
:
tf
.
constant
([
2
],
tf
.
float32
)
'num_detections'
:
tf
.
constant
([
2
],
tf
.
float32
)
}
}
if
self
.
_add_detection_masks
:
postprocessed_tensors
[
'detection_masks'
]
=
tf
.
constant
(
np
.
arange
(
32
).
reshape
([
2
,
4
,
4
]),
tf
.
float32
)
return
postprocessed_tensors
def
restore_fn
(
self
,
checkpoint_path
,
from_detection_checkpoint
):
def
restore_fn
(
self
,
checkpoint_path
,
from_detection_checkpoint
):
pass
pass
...
@@ -58,8 +67,11 @@ class ExportInferenceGraphTest(tf.test.TestCase):
...
@@ -58,8 +67,11 @@ class ExportInferenceGraphTest(tf.test.TestCase):
use_moving_averages
):
use_moving_averages
):
g
=
tf
.
Graph
()
g
=
tf
.
Graph
()
with
g
.
as_default
():
with
g
.
as_default
():
mock_model
=
FakeModel
(
num_classes
=
1
)
mock_model
=
FakeModel
()
mock_model
.
preprocess
(
tf
.
constant
([
1
,
3
,
4
,
3
],
tf
.
float32
))
preprocessed_inputs
=
mock_model
.
preprocess
(
tf
.
ones
([
1
,
3
,
4
,
3
],
tf
.
float32
))
predictions
=
mock_model
.
predict
(
preprocessed_inputs
)
mock_model
.
postprocess
(
predictions
)
if
use_moving_averages
:
if
use_moving_averages
:
tf
.
train
.
ExponentialMovingAverage
(
0.0
).
apply
()
tf
.
train
.
ExponentialMovingAverage
(
0.0
).
apply
()
saver
=
tf
.
train
.
Saver
()
saver
=
tf
.
train
.
Saver
()
...
@@ -93,7 +105,7 @@ class ExportInferenceGraphTest(tf.test.TestCase):
...
@@ -93,7 +105,7 @@ class ExportInferenceGraphTest(tf.test.TestCase):
def
test_export_graph_with_image_tensor_input
(
self
):
def
test_export_graph_with_image_tensor_input
(
self
):
with
mock
.
patch
.
object
(
with
mock
.
patch
.
object
(
model_builder
,
'build'
,
autospec
=
True
)
as
mock_builder
:
model_builder
,
'build'
,
autospec
=
True
)
as
mock_builder
:
mock_builder
.
return_value
=
FakeModel
(
num_classes
=
1
)
mock_builder
.
return_value
=
FakeModel
()
inference_graph_path
=
os
.
path
.
join
(
self
.
get_temp_dir
(),
inference_graph_path
=
os
.
path
.
join
(
self
.
get_temp_dir
(),
'exported_graph.pbtxt'
)
'exported_graph.pbtxt'
)
...
@@ -108,7 +120,7 @@ class ExportInferenceGraphTest(tf.test.TestCase):
...
@@ -108,7 +120,7 @@ class ExportInferenceGraphTest(tf.test.TestCase):
def
test_export_graph_with_tf_example_input
(
self
):
def
test_export_graph_with_tf_example_input
(
self
):
with
mock
.
patch
.
object
(
with
mock
.
patch
.
object
(
model_builder
,
'build'
,
autospec
=
True
)
as
mock_builder
:
model_builder
,
'build'
,
autospec
=
True
)
as
mock_builder
:
mock_builder
.
return_value
=
FakeModel
(
num_classes
=
1
)
mock_builder
.
return_value
=
FakeModel
()
inference_graph_path
=
os
.
path
.
join
(
self
.
get_temp_dir
(),
inference_graph_path
=
os
.
path
.
join
(
self
.
get_temp_dir
(),
'exported_graph.pbtxt'
)
'exported_graph.pbtxt'
)
pipeline_config
=
pipeline_pb2
.
TrainEvalPipelineConfig
()
pipeline_config
=
pipeline_pb2
.
TrainEvalPipelineConfig
()
...
@@ -119,6 +131,20 @@ class ExportInferenceGraphTest(tf.test.TestCase):
...
@@ -119,6 +131,20 @@ class ExportInferenceGraphTest(tf.test.TestCase):
checkpoint_path
=
None
,
checkpoint_path
=
None
,
inference_graph_path
=
inference_graph_path
)
inference_graph_path
=
inference_graph_path
)
def
test_export_graph_with_encoded_image_string_input
(
self
):
with
mock
.
patch
.
object
(
model_builder
,
'build'
,
autospec
=
True
)
as
mock_builder
:
mock_builder
.
return_value
=
FakeModel
()
inference_graph_path
=
os
.
path
.
join
(
self
.
get_temp_dir
(),
'exported_graph.pbtxt'
)
pipeline_config
=
pipeline_pb2
.
TrainEvalPipelineConfig
()
pipeline_config
.
eval_config
.
use_moving_averages
=
False
exporter
.
export_inference_graph
(
input_type
=
'encoded_image_string_tensor'
,
pipeline_config
=
pipeline_config
,
checkpoint_path
=
None
,
inference_graph_path
=
inference_graph_path
)
def
test_export_frozen_graph
(
self
):
def
test_export_frozen_graph
(
self
):
checkpoint_path
=
os
.
path
.
join
(
self
.
get_temp_dir
(),
'model-ckpt'
)
checkpoint_path
=
os
.
path
.
join
(
self
.
get_temp_dir
(),
'model-ckpt'
)
self
.
_save_checkpoint_from_mock_model
(
checkpoint_path
,
self
.
_save_checkpoint_from_mock_model
(
checkpoint_path
,
...
@@ -127,7 +153,7 @@ class ExportInferenceGraphTest(tf.test.TestCase):
...
@@ -127,7 +153,7 @@ class ExportInferenceGraphTest(tf.test.TestCase):
'exported_graph.pb'
)
'exported_graph.pb'
)
with
mock
.
patch
.
object
(
with
mock
.
patch
.
object
(
model_builder
,
'build'
,
autospec
=
True
)
as
mock_builder
:
model_builder
,
'build'
,
autospec
=
True
)
as
mock_builder
:
mock_builder
.
return_value
=
FakeModel
(
num_classes
=
1
)
mock_builder
.
return_value
=
FakeModel
()
pipeline_config
=
pipeline_pb2
.
TrainEvalPipelineConfig
()
pipeline_config
=
pipeline_pb2
.
TrainEvalPipelineConfig
()
pipeline_config
.
eval_config
.
use_moving_averages
=
False
pipeline_config
.
eval_config
.
use_moving_averages
=
False
exporter
.
export_inference_graph
(
exporter
.
export_inference_graph
(
...
@@ -144,7 +170,7 @@ class ExportInferenceGraphTest(tf.test.TestCase):
...
@@ -144,7 +170,7 @@ class ExportInferenceGraphTest(tf.test.TestCase):
'exported_graph.pb'
)
'exported_graph.pb'
)
with
mock
.
patch
.
object
(
with
mock
.
patch
.
object
(
model_builder
,
'build'
,
autospec
=
True
)
as
mock_builder
:
model_builder
,
'build'
,
autospec
=
True
)
as
mock_builder
:
mock_builder
.
return_value
=
FakeModel
(
num_classes
=
1
)
mock_builder
.
return_value
=
FakeModel
()
pipeline_config
=
pipeline_pb2
.
TrainEvalPipelineConfig
()
pipeline_config
=
pipeline_pb2
.
TrainEvalPipelineConfig
()
pipeline_config
.
eval_config
.
use_moving_averages
=
True
pipeline_config
.
eval_config
.
use_moving_averages
=
True
exporter
.
export_inference_graph
(
exporter
.
export_inference_graph
(
...
@@ -153,6 +179,55 @@ class ExportInferenceGraphTest(tf.test.TestCase):
...
@@ -153,6 +179,55 @@ class ExportInferenceGraphTest(tf.test.TestCase):
checkpoint_path
=
checkpoint_path
,
checkpoint_path
=
checkpoint_path
,
inference_graph_path
=
inference_graph_path
)
inference_graph_path
=
inference_graph_path
)
def
test_export_model_with_all_output_nodes
(
self
):
checkpoint_path
=
os
.
path
.
join
(
self
.
get_temp_dir
(),
'model-ckpt'
)
self
.
_save_checkpoint_from_mock_model
(
checkpoint_path
,
use_moving_averages
=
False
)
inference_graph_path
=
os
.
path
.
join
(
self
.
get_temp_dir
(),
'exported_graph.pb'
)
with
mock
.
patch
.
object
(
model_builder
,
'build'
,
autospec
=
True
)
as
mock_builder
:
mock_builder
.
return_value
=
FakeModel
(
add_detection_masks
=
True
)
pipeline_config
=
pipeline_pb2
.
TrainEvalPipelineConfig
()
exporter
.
export_inference_graph
(
input_type
=
'image_tensor'
,
pipeline_config
=
pipeline_config
,
checkpoint_path
=
checkpoint_path
,
inference_graph_path
=
inference_graph_path
)
inference_graph
=
self
.
_load_inference_graph
(
inference_graph_path
)
with
self
.
test_session
(
graph
=
inference_graph
):
inference_graph
.
get_tensor_by_name
(
'image_tensor:0'
)
inference_graph
.
get_tensor_by_name
(
'detection_boxes:0'
)
inference_graph
.
get_tensor_by_name
(
'detection_scores:0'
)
inference_graph
.
get_tensor_by_name
(
'detection_classes:0'
)
inference_graph
.
get_tensor_by_name
(
'detection_masks:0'
)
inference_graph
.
get_tensor_by_name
(
'num_detections:0'
)
def
test_export_model_with_detection_only_nodes
(
self
):
checkpoint_path
=
os
.
path
.
join
(
self
.
get_temp_dir
(),
'model-ckpt'
)
self
.
_save_checkpoint_from_mock_model
(
checkpoint_path
,
use_moving_averages
=
False
)
inference_graph_path
=
os
.
path
.
join
(
self
.
get_temp_dir
(),
'exported_graph.pb'
)
with
mock
.
patch
.
object
(
model_builder
,
'build'
,
autospec
=
True
)
as
mock_builder
:
mock_builder
.
return_value
=
FakeModel
(
add_detection_masks
=
False
)
pipeline_config
=
pipeline_pb2
.
TrainEvalPipelineConfig
()
exporter
.
export_inference_graph
(
input_type
=
'image_tensor'
,
pipeline_config
=
pipeline_config
,
checkpoint_path
=
checkpoint_path
,
inference_graph_path
=
inference_graph_path
)
inference_graph
=
self
.
_load_inference_graph
(
inference_graph_path
)
with
self
.
test_session
(
graph
=
inference_graph
):
inference_graph
.
get_tensor_by_name
(
'image_tensor:0'
)
inference_graph
.
get_tensor_by_name
(
'detection_boxes:0'
)
inference_graph
.
get_tensor_by_name
(
'detection_scores:0'
)
inference_graph
.
get_tensor_by_name
(
'detection_classes:0'
)
inference_graph
.
get_tensor_by_name
(
'num_detections:0'
)
with
self
.
assertRaises
(
KeyError
):
inference_graph
.
get_tensor_by_name
(
'detection_masks:0'
)
def
test_export_and_run_inference_with_image_tensor
(
self
):
def
test_export_and_run_inference_with_image_tensor
(
self
):
checkpoint_path
=
os
.
path
.
join
(
self
.
get_temp_dir
(),
'model-ckpt'
)
checkpoint_path
=
os
.
path
.
join
(
self
.
get_temp_dir
(),
'model-ckpt'
)
self
.
_save_checkpoint_from_mock_model
(
checkpoint_path
,
self
.
_save_checkpoint_from_mock_model
(
checkpoint_path
,
...
@@ -161,7 +236,7 @@ class ExportInferenceGraphTest(tf.test.TestCase):
...
@@ -161,7 +236,7 @@ class ExportInferenceGraphTest(tf.test.TestCase):
'exported_graph.pb'
)
'exported_graph.pb'
)
with
mock
.
patch
.
object
(
with
mock
.
patch
.
object
(
model_builder
,
'build'
,
autospec
=
True
)
as
mock_builder
:
model_builder
,
'build'
,
autospec
=
True
)
as
mock_builder
:
mock_builder
.
return_value
=
FakeModel
(
num_classes
=
1
)
mock_builder
.
return_value
=
FakeModel
(
add_detection_masks
=
True
)
pipeline_config
=
pipeline_pb2
.
TrainEvalPipelineConfig
()
pipeline_config
=
pipeline_pb2
.
TrainEvalPipelineConfig
()
pipeline_config
.
eval_config
.
use_moving_averages
=
False
pipeline_config
.
eval_config
.
use_moving_averages
=
False
exporter
.
export_inference_graph
(
exporter
.
export_inference_graph
(
...
@@ -176,16 +251,72 @@ class ExportInferenceGraphTest(tf.test.TestCase):
...
@@ -176,16 +251,72 @@ class ExportInferenceGraphTest(tf.test.TestCase):
boxes
=
inference_graph
.
get_tensor_by_name
(
'detection_boxes:0'
)
boxes
=
inference_graph
.
get_tensor_by_name
(
'detection_boxes:0'
)
scores
=
inference_graph
.
get_tensor_by_name
(
'detection_scores:0'
)
scores
=
inference_graph
.
get_tensor_by_name
(
'detection_scores:0'
)
classes
=
inference_graph
.
get_tensor_by_name
(
'detection_classes:0'
)
classes
=
inference_graph
.
get_tensor_by_name
(
'detection_classes:0'
)
masks
=
inference_graph
.
get_tensor_by_name
(
'detection_masks:0'
)
num_detections
=
inference_graph
.
get_tensor_by_name
(
'num_detections:0'
)
num_detections
=
inference_graph
.
get_tensor_by_name
(
'num_detections:0'
)
(
boxes
,
scores
,
classes
,
num_detections
)
=
sess
.
run
(
(
boxes
,
scores
,
classes
,
masks
,
num_detections
)
=
sess
.
run
(
[
boxes
,
scores
,
classes
,
num_detections
],
[
boxes
,
scores
,
classes
,
masks
,
num_detections
],
feed_dict
=
{
image_tensor
:
np
.
ones
((
1
,
4
,
4
,
3
)).
astype
(
np
.
uint8
)})
feed_dict
=
{
image_tensor
:
np
.
ones
((
1
,
4
,
4
,
3
)).
astype
(
np
.
uint8
)})
self
.
assertAllClose
(
boxes
,
[[
0.0
,
0.0
,
0.5
,
0.5
],
self
.
assertAllClose
(
boxes
,
[[
0.0
,
0.0
,
0.5
,
0.5
],
[
0.5
,
0.5
,
0.8
,
0.8
]])
[
0.5
,
0.5
,
0.8
,
0.8
]])
self
.
assertAllClose
(
scores
,
[[
0.7
,
0.6
]])
self
.
assertAllClose
(
scores
,
[[
0.7
,
0.6
]])
self
.
assertAllClose
(
classes
,
[[
1
,
2
]])
self
.
assertAllClose
(
classes
,
[[
1
,
2
]])
self
.
assertAllClose
(
masks
,
np
.
arange
(
32
).
reshape
([
2
,
4
,
4
]))
self
.
assertAllClose
(
num_detections
,
[
2
])
self
.
assertAllClose
(
num_detections
,
[
2
])
def
_create_encoded_image_string
(
self
,
image_array_np
,
encoding_format
):
od_graph
=
tf
.
Graph
()
with
od_graph
.
as_default
():
if
encoding_format
==
'jpg'
:
encoded_string
=
tf
.
image
.
encode_jpeg
(
image_array_np
)
elif
encoding_format
==
'png'
:
encoded_string
=
tf
.
image
.
encode_png
(
image_array_np
)
else
:
raise
ValueError
(
'Supports only the following formats: `jpg`, `png`'
)
with
self
.
test_session
(
graph
=
od_graph
):
return
encoded_string
.
eval
()
def
test_export_and_run_inference_with_encoded_image_string_tensor
(
self
):
checkpoint_path
=
os
.
path
.
join
(
self
.
get_temp_dir
(),
'model-ckpt'
)
self
.
_save_checkpoint_from_mock_model
(
checkpoint_path
,
use_moving_averages
=
False
)
inference_graph_path
=
os
.
path
.
join
(
self
.
get_temp_dir
(),
'exported_graph.pb'
)
with
mock
.
patch
.
object
(
model_builder
,
'build'
,
autospec
=
True
)
as
mock_builder
:
mock_builder
.
return_value
=
FakeModel
(
add_detection_masks
=
True
)
pipeline_config
=
pipeline_pb2
.
TrainEvalPipelineConfig
()
pipeline_config
.
eval_config
.
use_moving_averages
=
False
exporter
.
export_inference_graph
(
input_type
=
'encoded_image_string_tensor'
,
pipeline_config
=
pipeline_config
,
checkpoint_path
=
checkpoint_path
,
inference_graph_path
=
inference_graph_path
)
inference_graph
=
self
.
_load_inference_graph
(
inference_graph_path
)
jpg_image_str
=
self
.
_create_encoded_image_string
(
np
.
ones
((
4
,
4
,
3
)).
astype
(
np
.
uint8
),
'jpg'
)
png_image_str
=
self
.
_create_encoded_image_string
(
np
.
ones
((
4
,
4
,
3
)).
astype
(
np
.
uint8
),
'png'
)
with
self
.
test_session
(
graph
=
inference_graph
)
as
sess
:
image_str_tensor
=
inference_graph
.
get_tensor_by_name
(
'encoded_image_string_tensor:0'
)
boxes
=
inference_graph
.
get_tensor_by_name
(
'detection_boxes:0'
)
scores
=
inference_graph
.
get_tensor_by_name
(
'detection_scores:0'
)
classes
=
inference_graph
.
get_tensor_by_name
(
'detection_classes:0'
)
masks
=
inference_graph
.
get_tensor_by_name
(
'detection_masks:0'
)
num_detections
=
inference_graph
.
get_tensor_by_name
(
'num_detections:0'
)
for
image_str
in
[
jpg_image_str
,
png_image_str
]:
(
boxes_np
,
scores_np
,
classes_np
,
masks_np
,
num_detections_np
)
=
sess
.
run
(
[
boxes
,
scores
,
classes
,
masks
,
num_detections
],
feed_dict
=
{
image_str_tensor
:
image_str
})
self
.
assertAllClose
(
boxes_np
,
[[
0.0
,
0.0
,
0.5
,
0.5
],
[
0.5
,
0.5
,
0.8
,
0.8
]])
self
.
assertAllClose
(
scores_np
,
[[
0.7
,
0.6
]])
self
.
assertAllClose
(
classes_np
,
[[
1
,
2
]])
self
.
assertAllClose
(
masks_np
,
np
.
arange
(
32
).
reshape
([
2
,
4
,
4
]))
self
.
assertAllClose
(
num_detections_np
,
[
2
])
def
test_export_and_run_inference_with_tf_example
(
self
):
def
test_export_and_run_inference_with_tf_example
(
self
):
checkpoint_path
=
os
.
path
.
join
(
self
.
get_temp_dir
(),
'model-ckpt'
)
checkpoint_path
=
os
.
path
.
join
(
self
.
get_temp_dir
(),
'model-ckpt'
)
self
.
_save_checkpoint_from_mock_model
(
checkpoint_path
,
self
.
_save_checkpoint_from_mock_model
(
checkpoint_path
,
...
@@ -194,7 +325,7 @@ class ExportInferenceGraphTest(tf.test.TestCase):
...
@@ -194,7 +325,7 @@ class ExportInferenceGraphTest(tf.test.TestCase):
'exported_graph.pb'
)
'exported_graph.pb'
)
with
mock
.
patch
.
object
(
with
mock
.
patch
.
object
(
model_builder
,
'build'
,
autospec
=
True
)
as
mock_builder
:
model_builder
,
'build'
,
autospec
=
True
)
as
mock_builder
:
mock_builder
.
return_value
=
FakeModel
(
num_classes
=
1
)
mock_builder
.
return_value
=
FakeModel
(
add_detection_masks
=
True
)
pipeline_config
=
pipeline_pb2
.
TrainEvalPipelineConfig
()
pipeline_config
=
pipeline_pb2
.
TrainEvalPipelineConfig
()
pipeline_config
.
eval_config
.
use_moving_averages
=
False
pipeline_config
.
eval_config
.
use_moving_averages
=
False
exporter
.
export_inference_graph
(
exporter
.
export_inference_graph
(
...
@@ -209,17 +340,58 @@ class ExportInferenceGraphTest(tf.test.TestCase):
...
@@ -209,17 +340,58 @@ class ExportInferenceGraphTest(tf.test.TestCase):
boxes
=
inference_graph
.
get_tensor_by_name
(
'detection_boxes:0'
)
boxes
=
inference_graph
.
get_tensor_by_name
(
'detection_boxes:0'
)
scores
=
inference_graph
.
get_tensor_by_name
(
'detection_scores:0'
)
scores
=
inference_graph
.
get_tensor_by_name
(
'detection_scores:0'
)
classes
=
inference_graph
.
get_tensor_by_name
(
'detection_classes:0'
)
classes
=
inference_graph
.
get_tensor_by_name
(
'detection_classes:0'
)
masks
=
inference_graph
.
get_tensor_by_name
(
'detection_masks:0'
)
num_detections
=
inference_graph
.
get_tensor_by_name
(
'num_detections:0'
)
num_detections
=
inference_graph
.
get_tensor_by_name
(
'num_detections:0'
)
(
boxes
,
scores
,
classes
,
num_detections
)
=
sess
.
run
(
(
boxes
,
scores
,
classes
,
masks
,
num_detections
)
=
sess
.
run
(
[
boxes
,
scores
,
classes
,
num_detections
],
[
boxes
,
scores
,
classes
,
masks
,
num_detections
],
feed_dict
=
{
tf_example
:
self
.
_create_tf_example
(
feed_dict
=
{
tf_example
:
self
.
_create_tf_example
(
np
.
ones
((
4
,
4
,
3
)).
astype
(
np
.
uint8
))})
np
.
ones
((
4
,
4
,
3
)).
astype
(
np
.
uint8
))})
self
.
assertAllClose
(
boxes
,
[[
0.0
,
0.0
,
0.5
,
0.5
],
self
.
assertAllClose
(
boxes
,
[[
0.0
,
0.0
,
0.5
,
0.5
],
[
0.5
,
0.5
,
0.8
,
0.8
]])
[
0.5
,
0.5
,
0.8
,
0.8
]])
self
.
assertAllClose
(
scores
,
[[
0.7
,
0.6
]])
self
.
assertAllClose
(
scores
,
[[
0.7
,
0.6
]])
self
.
assertAllClose
(
classes
,
[[
1
,
2
]])
self
.
assertAllClose
(
classes
,
[[
1
,
2
]])
self
.
assertAllClose
(
masks
,
np
.
arange
(
32
).
reshape
([
2
,
4
,
4
]))
self
.
assertAllClose
(
num_detections
,
[
2
])
self
.
assertAllClose
(
num_detections
,
[
2
])
def
test_export_saved_model_and_run_inference
(
self
):
checkpoint_path
=
os
.
path
.
join
(
self
.
get_temp_dir
(),
'model-ckpt'
)
self
.
_save_checkpoint_from_mock_model
(
checkpoint_path
,
use_moving_averages
=
False
)
inference_graph_path
=
os
.
path
.
join
(
self
.
get_temp_dir
(),
'saved_model'
)
with
mock
.
patch
.
object
(
model_builder
,
'build'
,
autospec
=
True
)
as
mock_builder
:
mock_builder
.
return_value
=
FakeModel
(
add_detection_masks
=
True
)
pipeline_config
=
pipeline_pb2
.
TrainEvalPipelineConfig
()
pipeline_config
.
eval_config
.
use_moving_averages
=
False
exporter
.
export_inference_graph
(
input_type
=
'tf_example'
,
pipeline_config
=
pipeline_config
,
checkpoint_path
=
checkpoint_path
,
inference_graph_path
=
inference_graph_path
,
export_as_saved_model
=
True
)
with
tf
.
Graph
().
as_default
()
as
od_graph
:
with
self
.
test_session
(
graph
=
od_graph
)
as
sess
:
tf
.
saved_model
.
loader
.
load
(
sess
,
[
tf
.
saved_model
.
tag_constants
.
SERVING
],
inference_graph_path
)
tf_example
=
od_graph
.
get_tensor_by_name
(
'import/tf_example:0'
)
boxes
=
od_graph
.
get_tensor_by_name
(
'import/detection_boxes:0'
)
scores
=
od_graph
.
get_tensor_by_name
(
'import/detection_scores:0'
)
classes
=
od_graph
.
get_tensor_by_name
(
'import/detection_classes:0'
)
masks
=
od_graph
.
get_tensor_by_name
(
'import/detection_masks:0'
)
num_detections
=
od_graph
.
get_tensor_by_name
(
'import/num_detections:0'
)
(
boxes
,
scores
,
classes
,
masks
,
num_detections
)
=
sess
.
run
(
[
boxes
,
scores
,
classes
,
masks
,
num_detections
],
feed_dict
=
{
tf_example
:
self
.
_create_tf_example
(
np
.
ones
((
4
,
4
,
3
)).
astype
(
np
.
uint8
))})
self
.
assertAllClose
(
boxes
,
[[
0.0
,
0.0
,
0.5
,
0.5
],
[
0.5
,
0.5
,
0.8
,
0.8
]])
self
.
assertAllClose
(
scores
,
[[
0.7
,
0.6
]])
self
.
assertAllClose
(
classes
,
[[
1
,
2
]])
self
.
assertAllClose
(
masks
,
np
.
arange
(
32
).
reshape
([
2
,
4
,
4
]))
self
.
assertAllClose
(
num_detections
,
[
2
])
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
tf
.
test
.
main
()
tf
.
test
.
main
()
object_detection/g3doc/preparing_inputs.md
View file @
f906646c
...
@@ -2,7 +2,7 @@
...
@@ -2,7 +2,7 @@
Tensorflow Object Detection API reads data using the TFRecord file format. Two
Tensorflow Object Detection API reads data using the TFRecord file format. Two
sample scripts (
`create_pascal_tf_record.py`
and
`create_pet_tf_record.py`
) are
sample scripts (
`create_pascal_tf_record.py`
and
`create_pet_tf_record.py`
) are
provided to convert from the PASCAL VOC dataset and Oxford-IIT Pet dataset to
provided to convert from the PASCAL VOC dataset and Oxford-II
I
T Pet dataset to
TFRecords.
TFRecords.
## Generating the PASCAL VOC TFRecord files.
## Generating the PASCAL VOC TFRecord files.
...
@@ -11,35 +11,35 @@ The raw 2012 PASCAL VOC data set can be downloaded
...
@@ -11,35 +11,35 @@ The raw 2012 PASCAL VOC data set can be downloaded
[
here
](
http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar
)
.
[
here
](
http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar
)
.
Extract the tar file and run the
`create_pascal_tf_record`
script:
Extract the tar file and run the
`create_pascal_tf_record`
script:
```
```
bash
# From tensorflow/models/object_detection
# From tensorflow/models/object_detection
tar
-xvf
VOCtrainval_11-May-2012.tar
tar
-xvf
VOCtrainval_11-May-2012.tar
./
create_pascal_tf_record --data_dir=VOCdevkit \
python
create_pascal_tf_record
.py
--data_dir
=
VOCdevkit
\
--year
=
VOC2012
--set
=
train
--output_path
=
pascal_train.record
--year
=
VOC2012
--set
=
train
--output_path
=
pascal_train.record
./
create_pascal_tf_record --data_dir=
/home/user/
VOCdevkit \
python
create_pascal_tf_record
.py
--data_dir
=
VOCdevkit
\
--year
=
VOC2012
--set
=
val
--output_path
=
pascal_val.record
--year
=
VOC2012
--set
=
val
--output_path
=
pascal_val.record
```
```
You should end up with two TFRecord files named pascal_train.record and
You should end up with two TFRecord files named
`
pascal_train.record
`
and
pascal_val.record in the tensorflow/models/object_detection directory.
`
pascal_val.record
`
in the
`
tensorflow/models/object_detection
`
directory.
The label map for the PASCAL VOC data set can be found at
The label map for the PASCAL VOC data set can be found at
data/pascal_label_map.pbtxt.
`
data/pascal_label_map.pbtxt
`
.
## Generation the Oxford-IIT Pet TFRecord files.
## Generation the Oxford-II
I
T Pet TFRecord files.
The Oxford-IIT Pet data set can be downloaded from
The Oxford-II
I
T Pet data set can be downloaded from
[
their website
](
http://www.robots.ox.ac.uk/~vgg/data/pets/
)
. Extract the tar
[
their website
](
http://www.robots.ox.ac.uk/~vgg/data/pets/
)
. Extract the tar
file and run the
`create_pet_tf_record`
script to generate TFRecords.
file and run the
`create_pet_tf_record`
script to generate TFRecords.
```
```
bash
# From tensorflow/models/object_detection
# From tensorflow/models/object_detection
tar
-xvf
annotations.tar.gz
tar
-xvf
annotations.tar.gz
tar
-xvf
images.tar.gz
tar
-xvf
images.tar.gz
./
create_pet_tf_record --data_dir=`pwd` --output_dir=`pwd`
python
create_pet_tf_record
.py
--data_dir
=
`
pwd
`
--output_dir
=
`
pwd
`
```
```
You should end up with two TFRecord files named pet_train.record and
You should end up with two TFRecord files named
`
pet_train.record
`
and
pet_val.record in the tensorflow/models/object_detection directory.
`
pet_val.record
`
in the
`
tensorflow/models/object_detection
`
directory.
The label map for the Pet dataset can be found at data/pet_label_map.pbtxt.
The label map for the Pet dataset can be found at
`
data/pet_label_map.pbtxt
`
.
object_detection/g3doc/running_locally.md
View file @
f906646c
...
@@ -10,7 +10,7 @@ dependencies, compiling the configuration protobufs and setting up the Python
...
@@ -10,7 +10,7 @@ dependencies, compiling the configuration protobufs and setting up the Python
environment.
environment.
2.
A valid data set has been created. See
[
this page
](
preparing_inputs.md
)
for
2.
A valid data set has been created. See
[
this page
](
preparing_inputs.md
)
for
instructions on how to generate a dataset for the PASCAL VOC challenge or the
instructions on how to generate a dataset for the PASCAL VOC challenge or the
Oxford-IIT Pet dataset.
Oxford-II
I
T Pet dataset.
3.
A Object Detection pipeline configuration has been written. See
3.
A Object Detection pipeline configuration has been written. See
[
this page
](
configuring_jobs.md
)
for details on how to write a pipeline configuration.
[
this page
](
configuring_jobs.md
)
for details on how to write a pipeline configuration.
...
...
object_detection/g3doc/running_on_cloud.md
View file @
f906646c
...
@@ -11,7 +11,7 @@ See [the Cloud ML quick start guide](https://cloud.google.com/ml-engine/docs/qui
...
@@ -11,7 +11,7 @@ See [the Cloud ML quick start guide](https://cloud.google.com/ml-engine/docs/qui
in the
[
installation instructions
](
installation.md
)
.
in the
[
installation instructions
](
installation.md
)
.
3.
The reader has a valid data set and stored it in a Google Cloud Storage
3.
The reader has a valid data set and stored it in a Google Cloud Storage
bucket. See
[
this page
](
preparing_inputs.md
)
for instructions on how to generate
bucket. See
[
this page
](
preparing_inputs.md
)
for instructions on how to generate
a dataset for the PASCAL VOC challenge or the Oxford-IIT Pet dataset.
a dataset for the PASCAL VOC challenge or the Oxford-II
I
T Pet dataset.
4.
The reader has configured a valid Object Detection pipeline, and stored it
4.
The reader has configured a valid Object Detection pipeline, and stored it
in a Google Cloud Storage bucket. See
[
this page
](
configuring_jobs.md
)
for
in a Google Cloud Storage bucket. See
[
this page
](
configuring_jobs.md
)
for
details on how to write a pipeline configuration.
details on how to write a pipeline configuration.
...
@@ -88,7 +88,7 @@ training checkpoints and events will be written to and
...
@@ -88,7 +88,7 @@ training checkpoints and events will be written to and
Google Cloud Storage.
Google Cloud Storage.
Users can monitor the progress of their training job on the
[
ML Engine
Users can monitor the progress of their training job on the
[
ML Engine
Dasboard
](
https://pantheon.corp.google.com/mlengine/jobs
)
.
Das
h
board
](
https://pantheon.corp.google.com/mlengine/jobs
)
.
## Running an Evaluation Job on Cloud
## Running an Evaluation Job on Cloud
...
...
object_detection/g3doc/running_pets.md
View file @
f906646c
# Quick Start: Distributed Training on the Oxford-IIT Pets Dataset on Google Cloud
# Quick Start: Distributed Training on the Oxford-II
I
T Pets Dataset on Google Cloud
This page is a walkthrough for training an object detector using the Tensorflow
This page is a walkthrough for training an object detector using the Tensorflow
Object Detection API. In this tutorial, we'll be training on the Oxford-IIT Pets
Object Detection API. In this tutorial, we'll be training on the Oxford-II
I
T Pets
dataset to build a system to detect various breeds of cats and dogs. The output
dataset to build a system to detect various breeds of cats and dogs. The output
of the detector will look like the following:
of the detector will look like the following:
...
@@ -43,16 +43,16 @@ Please run through the [installation instructions](installation.md) to install
...
@@ -43,16 +43,16 @@ Please run through the [installation instructions](installation.md) to install
Tensorflow and all it dependencies. Ensure the Protobuf libraries are
Tensorflow and all it dependencies. Ensure the Protobuf libraries are
compiled and the library directories are added to
`PYTHONPATH`
.
compiled and the library directories are added to
`PYTHONPATH`
.
## Getting the Oxford-IIT Pets Dataset and Uploading it to Google Cloud Storage
## Getting the Oxford-II
I
T Pets Dataset and Uploading it to Google Cloud Storage
In order to train a detector, we require a dataset of images, bounding boxes and
In order to train a detector, we require a dataset of images, bounding boxes and
classifications. For this demo, we'll use the Oxford-IIT Pets dataset. The raw
classifications. For this demo, we'll use the Oxford-II
I
T Pets dataset. The raw
dataset for Oxford-IIT Pets lives
dataset for Oxford-II
I
T Pets lives
[
here
](
http://www.robots.ox.ac.uk/~vgg/data/pets/
)
. You will need to download
[
here
](
http://www.robots.ox.ac.uk/~vgg/data/pets/
)
. You will need to download
both the image dataset
[
`images.tar.gz`
](
http://www.robots.ox.ac.uk/~vgg/data/pets/data/images.tar.gz
)
both the image dataset
[
`images.tar.gz`
](
http://www.robots.ox.ac.uk/~vgg/data/pets/data/images.tar.gz
)
and the groundtruth data
[
`annotations.tar.gz`
](
http://www.robots.ox.ac.uk/~vgg/data/pets/data/annotations.tar.gz
)
and the groundtruth data
[
`annotations.tar.gz`
](
http://www.robots.ox.ac.uk/~vgg/data/pets/data/annotations.tar.gz
)
to the tensorflow/models directory. This may take some time. After downloading
to the
`
tensorflow/models
`
directory. This may take some time. After downloading
the tarballs, your object_detection directory should appear as follows:
the tarballs, your
`
object_detection
`
directory should appear as follows:
```
lang-none
```
lang-none
+ object_detection/
+ object_detection/
...
@@ -64,9 +64,9 @@ the tarballs, your object_detection directory should appear as follows:
...
@@ -64,9 +64,9 @@ the tarballs, your object_detection directory should appear as follows:
```
```
The Tensorflow Object Detection API expects data to be in the TFRecord format,
The Tensorflow Object Detection API expects data to be in the TFRecord format,
so we'll now run the
_
create_pet_tf_record
_
script to convert from the raw
so we'll now run the
`
create_pet_tf_record
`
script to convert from the raw
Oxford-IIT Pet dataset into TFRecords. Run the following commands from the
Oxford-II
I
T Pet dataset into TFRecords. Run the following commands from the
object_detection directory:
`
object_detection
`
directory:
```
bash
```
bash
# From tensorflow/models/
# From tensorflow/models/
...
@@ -83,12 +83,12 @@ python object_detection/create_pet_tf_record.py \
...
@@ -83,12 +83,12 @@ python object_detection/create_pet_tf_record.py \
Note: It is normal to see some warnings when running this script. You may ignore
Note: It is normal to see some warnings when running this script. You may ignore
them.
them.
Two TFRecord files named pet_train.record and pet_val.record should be generated
Two TFRecord files named
`
pet_train.record
`
and
`
pet_val.record
`
should be generated
in the object_detection
/
directory.
in the
`
object_detection
`
directory.
Now that the data has been generated, we'll need to upload it to Google Cloud
Now that the data has been generated, we'll need to upload it to Google Cloud
Storage so the data can be accessed by ML Engine. Run the following command to
Storage so the data can be accessed by ML Engine. Run the following command to
copy the files into your GCS bucket (substituting ${YOUR_GCS_BUCKET}):
copy the files into your GCS bucket (substituting
`
${YOUR_GCS_BUCKET}
`
):
```
bash
```
bash
# From tensorflow/models/
# From tensorflow/models/
...
@@ -109,7 +109,7 @@ parameters to initialize our new model.
...
@@ -109,7 +109,7 @@ parameters to initialize our new model.
Download our
[
COCO-pretrained Faster R-CNN with Resnet-101
Download our
[
COCO-pretrained Faster R-CNN with Resnet-101
model
](
http://storage.googleapis.com/download.tensorflow.org/models/object_detection/faster_rcnn_resnet101_coco_11_06_2017.tar.gz
)
.
model
](
http://storage.googleapis.com/download.tensorflow.org/models/object_detection/faster_rcnn_resnet101_coco_11_06_2017.tar.gz
)
.
Unzip the contents of the folder and copy the model.ckpt
*
files into your GCS
Unzip the contents of the folder and copy the
`
model.ckpt*
`
files into your GCS
Bucket.
Bucket.
```
bash
```
bash
...
@@ -127,14 +127,14 @@ In the Tensorflow Object Detection API, the model parameters, training
...
@@ -127,14 +127,14 @@ In the Tensorflow Object Detection API, the model parameters, training
parameters and eval parameters are all defined by a config file. More details
parameters and eval parameters are all defined by a config file. More details
can be found
[
here
](
configuring_jobs.md
)
. For this tutorial, we will use some
can be found
[
here
](
configuring_jobs.md
)
. For this tutorial, we will use some
predefined templates provided with the source code. In the
predefined templates provided with the source code. In the
object_detection/samples/configs folder, there are skeleton object_detection
`
object_detection/samples/configs
`
folder, there are skeleton object_detection
configuration files. We will use
`faster_rcnn_resnet101_pets.config`
as a
configuration files. We will use
`faster_rcnn_resnet101_pets.config`
as a
starting point for configuring the pipeline. Open the file with your favourite
starting point for configuring the pipeline. Open the file with your favourite
text editor.
text editor.
We'll need to configure some paths in order for the template to work. Search the
We'll need to configure some paths in order for the template to work. Search the
file for instances of
`PATH_TO_BE_CONFIGURED`
and replace them with the
file for instances of
`PATH_TO_BE_CONFIGURED`
and replace them with the
appropriate value (typically
"
gs://${YOUR_GCS_BUCKET}/data/
"
). Afterwards
appropriate value (typically
`
gs://${YOUR_GCS_BUCKET}/data/
`
). Afterwards
upload your edited file onto GCS, making note of the path it was uploaded to
upload your edited file onto GCS, making note of the path it was uploaded to
(we'll need it when starting the training/eval jobs).
(we'll need it when starting the training/eval jobs).
...
@@ -146,7 +146,7 @@ upload your edited file onto GCS, making note of the path it was uploaded to
...
@@ -146,7 +146,7 @@ upload your edited file onto GCS, making note of the path it was uploaded to
sed
-i
"s|PATH_TO_BE_CONFIGURED|"
gs://
${
YOUR_GCS_BUCKET
}
"/data|g"
\
sed
-i
"s|PATH_TO_BE_CONFIGURED|"
gs://
${
YOUR_GCS_BUCKET
}
"/data|g"
\
object_detection/samples/configs/faster_rcnn_resnet101_pets.config
object_detection/samples/configs/faster_rcnn_resnet101_pets.config
# Copy edit
t
ed template to cloud.
# Copy edited template to cloud.
gsutil
cp
object_detection/samples/configs/faster_rcnn_resnet101_pets.config
\
gsutil
cp
object_detection/samples/configs/faster_rcnn_resnet101_pets.config
\
gs://
${
YOUR_GCS_BUCKET
}
/data/faster_rcnn_resnet101_pets.config
gs://
${
YOUR_GCS_BUCKET
}
/data/faster_rcnn_resnet101_pets.config
```
```
...
@@ -171,7 +171,7 @@ the following:
...
@@ -171,7 +171,7 @@ the following:
```
```
You can inspect your bucket using the
[
Google Cloud Storage
You can inspect your bucket using the
[
Google Cloud Storage
browser
](
pantheon.corp
.google.com/storage
)
.
browser
](
https://console.cloud
.google.com/storage
/browser
)
.
## Starting Training and Evaluation Jobs on Google Cloud ML Engine
## Starting Training and Evaluation Jobs on Google Cloud ML Engine
...
@@ -181,7 +181,7 @@ Before we can start a job on Google Cloud ML Engine, we must:
...
@@ -181,7 +181,7 @@ Before we can start a job on Google Cloud ML Engine, we must:
2.
Write a cluster configuration for our Google Cloud ML job.
2.
Write a cluster configuration for our Google Cloud ML job.
To package the Tensorflow Object Detection code, run the following commands from
To package the Tensorflow Object Detection code, run the following commands from
the tensorflow/models/ directory:
the
`
tensorflow/models/
`
directory:
```
bash
```
bash
# From tensorflow/models/
# From tensorflow/models/
...
@@ -194,9 +194,9 @@ and `slim/dist/slim-0.1.tar.gz`.
...
@@ -194,9 +194,9 @@ and `slim/dist/slim-0.1.tar.gz`.
For running the training Cloud ML job, we'll configure the cluster to use 10
For running the training Cloud ML job, we'll configure the cluster to use 10
training jobs (1 master + 9 workers) and three parameters servers. The
training jobs (1 master + 9 workers) and three parameters servers. The
configuration file can be found at object_detection/samples/cloud/cloud.yml.
configuration file can be found at
`
object_detection/samples/cloud/cloud.yml
`
.
To start training, execute the following command from the tensorflow/models/
To start training, execute the following command from the
`
tensorflow/models/
`
directory:
directory:
```
bash
```
bash
...
@@ -233,7 +233,7 @@ submit training` command is correct. ML Engine does not distinguish between
...
@@ -233,7 +233,7 @@ submit training` command is correct. ML Engine does not distinguish between
training and evaluation jobs.
training and evaluation jobs.
Users can monitor and stop training and evaluation jobs on the
[
ML Engine
Users can monitor and stop training and evaluation jobs on the
[
ML Engine
Dasboard
](
https://
pantheon.corp
.google.com/mlengine/jobs
)
.
Das
h
board
](
https://
console.cloud
.google.com/mlengine/jobs
)
.
## Monitoring Progress with Tensorboard
## Monitoring Progress with Tensorboard
...
@@ -263,35 +263,35 @@ Note: It takes roughly 10 minutes for a job to get started on ML Engine, and
...
@@ -263,35 +263,35 @@ Note: It takes roughly 10 minutes for a job to get started on ML Engine, and
roughly an hour for the system to evaluate the validation dataset. It may take
roughly an hour for the system to evaluate the validation dataset. It may take
some time to populate the dashboards. If you do not see any entries after half
some time to populate the dashboards. If you do not see any entries after half
an hour, check the logs from the
[
ML Engine
an hour, check the logs from the
[
ML Engine
Dasboard
](
https://
pantheon.corp
.google.com/mlengine/jobs
)
.
Das
h
board
](
https://
console.cloud
.google.com/mlengine/jobs
)
.
## Exporting the Tensorflow Graph
## Exporting the Tensorflow Graph
After your model has been trained, you should export it to a Tensorflow
After your model has been trained, you should export it to a Tensorflow
graph proto. First, you need to identify a candidate checkpoint to export. You
graph proto. First, you need to identify a candidate checkpoint to export. You
can search your bucket using the
[
Google Cloud Storage
can search your bucket using the
[
Google Cloud Storage
Browser
](
https://
pantheon.corp
.google.com/storage/browser
)
. The file should be
Browser
](
https://
console.cloud
.google.com/storage/browser
)
. The file should be
stored under ${YOUR_GCS_BUCKET}/train. The checkpoint will typically consist of
stored under
`
${YOUR_GCS_BUCKET}/train
`
. The checkpoint will typically consist of
three files:
three files:
*
model.ckpt-${CHECKPOINT_NUMBER}.data-00000-of-00001
,
*
`
model.ckpt-${CHECKPOINT_NUMBER}.data-00000-of-00001
`
*
model.ckpt-${CHECKPOINT_NUMBER}.index
*
`
model.ckpt-${CHECKPOINT_NUMBER}.index
`
*
model.ckpt-${CHECKPOINT_NUMBER}.meta
*
`
model.ckpt-${CHECKPOINT_NUMBER}.meta
`
After you've identified a candidate checkpoint to export, run the following
After you've identified a candidate checkpoint to export, run the following
command from tensorflow/models/object_detection:
command from
`
tensorflow/models/object_detection
`
:
```
bash
```
bash
# From tensorflow/models
# From tensorflow/models
gsutil
cp
gs://
${
YOUR_GCS_BUCKET
}
/train/model.ckpt-
${
CHECKPOINT_NUMBER
}
.
*
.
gsutil
cp
gs://
${
YOUR_GCS_BUCKET
}
/train/model.ckpt-
${
CHECKPOINT_NUMBER
}
.
*
.
python object_detection/export_inference_graph
\
python object_detection/export_inference_graph
.py
\
--input_type
image_tensor
\
--input_type
image_tensor
\
--pipeline_config_path
object_detection/samples/configs/faster_rcnn_resnet101_pets.config
\
--pipeline_config_path
object_detection/samples/configs/faster_rcnn_resnet101_pets.config
\
--checkpoint_path
model.ckpt-
${
CHECKPOINT_NUMBER
}
\
--checkpoint_path
model.ckpt-
${
CHECKPOINT_NUMBER
}
\
--inference_graph_path
output_inference_graph.pb
--inference_graph_path
output_inference_graph.pb
```
```
Afterwards, you should see a graph named output_inference_graph.pb.
Afterwards, you should see a graph named
`
output_inference_graph.pb
`
.
## What's Next
## What's Next
...
...
object_detection/samples/configs/faster_rcnn_inception_resnet_v2_atrous_pets.config
View file @
f906646c
# Faster R-CNN with Inception Resnet v2, Atrous version;
# Faster R-CNN with Inception Resnet v2, Atrous version;
# Configured for Oxford-IIT Pets Dataset.
# Configured for Oxford-II
I
T Pets Dataset.
# Users should configure the fine_tune_checkpoint field in the train config as
# Users should configure the fine_tune_checkpoint field in the train config as
# well as the label_map_path and input_path fields in the train_input_reader and
# well as the label_map_path and input_path fields in the train_input_reader and
# eval_input_reader. Search for "PATH_TO_BE_CONFIGURED" to find the fields that
# eval_input_reader. Search for "PATH_TO_BE_CONFIGURED" to find the fields that
...
...
object_detection/samples/configs/faster_rcnn_resnet101_pets.config
View file @
f906646c
# Faster R-CNN with Resnet-101 (v1) configured for the Oxford-IIT Pet Dataset.
# Faster R-CNN with Resnet-101 (v1) configured for the Oxford-II
I
T Pet Dataset.
# Users should configure the fine_tune_checkpoint field in the train config as
# Users should configure the fine_tune_checkpoint field in the train config as
# well as the label_map_path and input_path fields in the train_input_reader and
# well as the label_map_path and input_path fields in the train_input_reader and
# eval_input_reader. Search for "PATH_TO_BE_CONFIGURED" to find the fields that
# eval_input_reader. Search for "PATH_TO_BE_CONFIGURED" to find the fields that
...
...
object_detection/samples/configs/faster_rcnn_resnet101_voc07.config
View file @
f906646c
...
@@ -118,9 +118,9 @@ train_config: {
...
@@ -118,9 +118,9 @@ train_config: {
train_input_reader
: {
train_input_reader
: {
tf_record_input_reader
{
tf_record_input_reader
{
input_path
:
"PATH_TO_BE_CONFIGURED/pascal_
voc_
train.record"
input_path
:
"PATH_TO_BE_CONFIGURED/pascal_train.record"
}
}
label_map_path
:
"PATH_TO_BE_CONFIGURED/pascal_
voc_
label_map.pbtxt"
label_map_path
:
"PATH_TO_BE_CONFIGURED/pascal_label_map.pbtxt"
}
}
eval_config
: {
eval_config
: {
...
@@ -129,7 +129,7 @@ eval_config: {
...
@@ -129,7 +129,7 @@ eval_config: {
eval_input_reader
: {
eval_input_reader
: {
tf_record_input_reader
{
tf_record_input_reader
{
input_path
:
"PATH_TO_BE_CONFIGURED/pascal_
voc_
val.record"
input_path
:
"PATH_TO_BE_CONFIGURED/pascal_val.record"
}
}
label_map_path
:
"PATH_TO_BE_CONFIGURED/pascal_
voc_
label_map.pbtxt"
label_map_path
:
"PATH_TO_BE_CONFIGURED/pascal_label_map.pbtxt"
}
}
Prev
1
2
3
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment