Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ModelZoo
ResNet50_tensorflow
Commits
1f5a5e9d
Unverified
Commit
1f5a5e9d
authored
Aug 27, 2019
by
Hongkun Yu
Committed by
GitHub
Aug 27, 2019
Browse files
revert reverted changes... (#7503)
parent
5ba3c3f5
Changes
9
Hide whitespace changes
Inline
Side-by-side
Showing
9 changed files
with
115 additions
and
101 deletions
+115
-101
research/lstm_object_detection/export_tflite_lstd_graph.py
research/lstm_object_detection/export_tflite_lstd_graph.py
+10
-6
research/lstm_object_detection/export_tflite_lstd_graph_lib.py
...rch/lstm_object_detection/export_tflite_lstd_graph_lib.py
+22
-24
research/lstm_object_detection/export_tflite_lstd_model.py
research/lstm_object_detection/export_tflite_lstd_model.py
+31
-28
research/lstm_object_detection/g3doc/exporting_models.md
research/lstm_object_detection/g3doc/exporting_models.md
+13
-13
research/lstm_object_detection/test_tflite_model.py
research/lstm_object_detection/test_tflite_model.py
+21
-18
research/lstm_object_detection/tflite/BUILD
research/lstm_object_detection/tflite/BUILD
+9
-2
research/lstm_object_detection/tflite/WORKSPACE
research/lstm_object_detection/tflite/WORKSPACE
+0
-6
research/lstm_object_detection/tflite/mobile_lstd_tflite_client.cc
...lstm_object_detection/tflite/mobile_lstd_tflite_client.cc
+5
-0
research/lstm_object_detection/tflite/mobile_ssd_tflite_client.h
...h/lstm_object_detection/tflite/mobile_ssd_tflite_client.h
+4
-4
No files found.
research/lstm_object_detection/export_tflite_lstd_graph.py
View file @
1f5a5e9d
...
...
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r
"""Exports an LSTM detection model to use with tf-lite.
Outputs file:
...
...
@@ -86,8 +85,9 @@ python lstm_object_detection/export_tflite_lstd_graph.py \
"""
import
tensorflow
as
tf
from
lstm_object_detection.utils
import
config_util
from
lstm_object_detection
import
export_tflite_lstd_graph_lib
from
lstm_object_detection.utils
import
config_util
flags
=
tf
.
app
.
flags
flags
.
DEFINE_string
(
'output_directory'
,
None
,
'Path to write outputs.'
)
...
...
@@ -122,12 +122,16 @@ def main(argv):
flags
.
mark_flag_as_required
(
'trained_checkpoint_prefix'
)
pipeline_config
=
config_util
.
get_configs_from_pipeline_file
(
FLAGS
.
pipeline_config_path
)
FLAGS
.
pipeline_config_path
)
export_tflite_lstd_graph_lib
.
export_tflite_graph
(
pipeline_config
,
FLAGS
.
trained_checkpoint_prefix
,
FLAGS
.
output_directory
,
FLAGS
.
add_postprocessing_op
,
FLAGS
.
max_detections
,
FLAGS
.
max_classes_per_detection
,
use_regular_nms
=
FLAGS
.
use_regular_nms
)
pipeline_config
,
FLAGS
.
trained_checkpoint_prefix
,
FLAGS
.
output_directory
,
FLAGS
.
add_postprocessing_op
,
FLAGS
.
max_detections
,
FLAGS
.
max_classes_per_detection
,
use_regular_nms
=
FLAGS
.
use_regular_nms
)
if
__name__
==
'__main__'
:
...
...
research/lstm_object_detection/export_tflite_lstd_graph_lib.py
View file @
1f5a5e9d
...
...
@@ -12,26 +12,26 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r
"""Exports detection models to use with tf-lite.
See export_tflite_lstd_graph.py for usage.
"""
import
os
import
tempfile
import
numpy
as
np
import
tensorflow
as
tf
from
tensorflow.core.framework
import
attr_value_pb2
from
tensorflow.core.framework
import
types_pb2
from
tensorflow.core.protobuf
import
saver_pb2
from
tensorflow.tools.graph_transforms
import
TransformGraph
from
lstm_object_detection
import
model_builder
from
object_detection
import
exporter
from
object_detection.builders
import
graph_rewriter_builder
from
object_detection.builders
import
post_processing_builder
from
object_detection.core
import
box_list
from
lstm_object_detection
import
model_builder
_DEFAULT_NUM_CHANNELS
=
3
_DEFAULT_NUM_COORD_BOX
=
4
...
...
@@ -84,11 +84,11 @@ def append_postprocessing_op(frozen_graph_def,
num_classes: number of classes in SSD detector
scale_values: scale values is a dict with following key-value pairs
{y_scale: 10, x_scale: 10, h_scale: 5, w_scale: 5} that are used in decode
centersize boxes
centersize boxes
detections_per_class: In regular NonMaxSuppression, number of anchors used
for NonMaxSuppression per class
use_regular_nms: Flag to set postprocessing op to use Regular NMS instead
of
Fast NMS.
for NonMaxSuppression per class
use_regular_nms: Flag to set postprocessing op to use Regular NMS instead
of
Fast NMS.
Returns:
transformed_graph_def: Frozen GraphDef with postprocessing custom op
...
...
@@ -165,9 +165,9 @@ def export_tflite_graph(pipeline_config,
is written to output_dir/tflite_graph.pb.
Args:
pipeline_config: Dictionary of configuration objects. Keys are `model`,
`train_config`,
`train_input_config`, `eval_config`, `eval_input_config`,
`lstm_model`.
Value are the corresponding config objects.
pipeline_config: Dictionary of configuration objects. Keys are `model`,
`train_config`,
`train_input_config`, `eval_config`, `eval_input_config`,
`lstm_model`.
Value are the corresponding config objects.
trained_checkpoint_prefix: a file prefix for the checkpoint containing the
trained parameters of the SSD model.
output_dir: A directory to write the tflite graph and anchor file to.
...
...
@@ -176,9 +176,9 @@ def export_tflite_graph(pipeline_config,
max_detections: Maximum number of detections (boxes) to show
max_classes_per_detection: Number of classes to display per detection
detections_per_class: In regular NonMaxSuppression, number of anchors used
for NonMaxSuppression per class
use_regular_nms: Flag to set postprocessing op to use Regular NMS instead
of
Fast NMS.
for NonMaxSuppression per class
use_regular_nms: Flag to set postprocessing op to use Regular NMS instead
of
Fast NMS.
binary_graph_name: Name of the exported graph file in binary format.
txt_graph_name: Name of the exported graph file in text format.
...
...
@@ -197,12 +197,10 @@ def export_tflite_graph(pipeline_config,
num_classes
=
model_config
.
ssd
.
num_classes
nms_score_threshold
=
{
model_config
.
ssd
.
post_processing
.
batch_non_max_suppression
.
score_threshold
model_config
.
ssd
.
post_processing
.
batch_non_max_suppression
.
score_threshold
}
nms_iou_threshold
=
{
model_config
.
ssd
.
post_processing
.
batch_non_max_suppression
.
iou_threshold
model_config
.
ssd
.
post_processing
.
batch_non_max_suppression
.
iou_threshold
}
scale_values
=
{}
scale_values
[
'y_scale'
]
=
{
...
...
@@ -226,7 +224,7 @@ def export_tflite_graph(pipeline_config,
width
=
image_resizer_config
.
fixed_shape_resizer
.
width
if
image_resizer_config
.
fixed_shape_resizer
.
convert_to_grayscale
:
num_channels
=
1
#TODO(richardbrks) figure out how to make with a None defined batch size
shape
=
[
lstm_config
.
eval_unroll_length
,
height
,
width
,
num_channels
]
else
:
raise
ValueError
(
...
...
@@ -235,14 +233,14 @@ def export_tflite_graph(pipeline_config,
image_resizer_config
.
WhichOneof
(
'image_resizer_oneof'
)))
video_tensor
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
shape
,
name
=
'input_video_tensor'
)
tf
.
float32
,
shape
=
shape
,
name
=
'input_video_tensor'
)
detection_model
=
model_builder
.
build
(
model_config
,
lstm_config
,
is_training
=
False
)
detection_model
=
model_builder
.
build
(
model_config
,
lstm_config
,
is_training
=
False
)
preprocessed_video
,
true_image_shapes
=
detection_model
.
preprocess
(
tf
.
to_float
(
video_tensor
))
tf
.
to_float
(
video_tensor
))
predicted_tensors
=
detection_model
.
predict
(
preprocessed_video
,
true_image_shapes
)
true_image_shapes
)
# predicted_tensors = detection_model.postprocess(predicted_tensors,
# true_image_shapes)
# The score conversion occurs before the post-processing custom op
...
...
@@ -311,7 +309,7 @@ def export_tflite_graph(pipeline_config,
initializer_nodes
=
''
)
# Add new operation to do post processing in a custom op (TF Lite only)
#(richardbrks) Do use this or detection_model.postprocess?
if
add_postprocessing_op
:
transformed_graph_def
=
append_postprocessing_op
(
frozen_graph_def
,
max_detections
,
max_classes_per_detection
,
...
...
research/lstm_object_detection/export_tflite_lstd_model.py
View file @
1f5a5e9d
...
...
@@ -13,6 +13,8 @@
# limitations under the License.
# ==============================================================================
"""Export a LSTD model in tflite format."""
import
os
from
absl
import
flags
import
tensorflow
as
tf
...
...
@@ -29,34 +31,35 @@ FLAGS = flags.FLAGS
def
main
(
_
):
flags
.
mark_flag_as_required
(
'export_path'
)
flags
.
mark_flag_as_required
(
'frozen_graph_path'
)
flags
.
mark_flag_as_required
(
'pipeline_config_path'
)
configs
=
config_util
.
get_configs_from_pipeline_file
(
FLAGS
.
pipeline_config_path
)
lstm_config
=
configs
[
'lstm_model'
]
input_arrays
=
[
'input_video_tensor'
]
output_arrays
=
[
'TFLite_Detection_PostProcess'
,
'TFLite_Detection_PostProcess:1'
,
'TFLite_Detection_PostProcess:2'
,
'TFLite_Detection_PostProcess:3'
,
]
input_shapes
=
{
'input_video_tensor'
:
[
lstm_config
.
eval_unroll_length
,
320
,
320
,
3
],
}
converter
=
tf
.
lite
.
TFLiteConverter
.
from_frozen_graph
(
FLAGS
.
frozen_graph_path
,
input_arrays
,
output_arrays
,
input_shapes
=
input_shapes
)
converter
.
allow_custom_ops
=
True
tflite_model
=
converter
.
convert
()
ofilename
=
os
.
path
.
join
(
FLAGS
.
export_path
)
open
(
ofilename
,
"wb"
).
write
(
tflite_model
)
flags
.
mark_flag_as_required
(
'export_path'
)
flags
.
mark_flag_as_required
(
'frozen_graph_path'
)
flags
.
mark_flag_as_required
(
'pipeline_config_path'
)
configs
=
config_util
.
get_configs_from_pipeline_file
(
FLAGS
.
pipeline_config_path
)
lstm_config
=
configs
[
'lstm_model'
]
input_arrays
=
[
'input_video_tensor'
]
output_arrays
=
[
'TFLite_Detection_PostProcess'
,
'TFLite_Detection_PostProcess:1'
,
'TFLite_Detection_PostProcess:2'
,
'TFLite_Detection_PostProcess:3'
,
]
input_shapes
=
{
'input_video_tensor'
:
[
lstm_config
.
eval_unroll_length
,
320
,
320
,
3
],
}
converter
=
tf
.
lite
.
TFLiteConverter
.
from_frozen_graph
(
FLAGS
.
frozen_graph_path
,
input_arrays
,
output_arrays
,
input_shapes
=
input_shapes
)
converter
.
allow_custom_ops
=
True
tflite_model
=
converter
.
convert
()
ofilename
=
os
.
path
.
join
(
FLAGS
.
export_path
)
open
(
ofilename
,
'wb'
).
write
(
tflite_model
)
if
__name__
==
'__main__'
:
tf
.
app
.
run
()
tf
.
app
.
run
()
research/lstm_object_detection/g3doc/exporting_models.md
View file @
1f5a5e9d
# Exporting a tflite model from a checkpoint
Starting from a trained model checkpoint, creating a tflite model requires 2 steps:
*
exporting a tflite frozen graph from a checkpoint
*
exporting a tflite model from a frozen graph
Starting from a trained model checkpoint, creating a tflite model requires 2
steps:
*
exporting a tflite frozen graph from a checkpoint
*
exporting a tflite model from a frozen graph
## Exporting a tflite frozen graph from a checkpoint
...
...
@@ -20,14 +20,14 @@ python lstm_object_detection/export_tflite_lstd_graph.py \
--pipeline_config_path
${
PIPELINE_CONFIG_PATH
}
\
--trained_checkpoint_prefix
${
TRAINED_CKPT_PREFIX
}
\
--output_directory
${
EXPORT_DIR
}
\
--add_preprocessing_op
--add_preprocessing_op
```
After export, you should see the directory ${EXPORT_DIR} containing the following files:
*
`tflite_graph.pb`
*
`tflite_graph.pbtxt`
After export, you should see the directory ${EXPORT_DIR} containing the
following files:
*
`tflite_graph.pb`
*
`tflite_graph.pbtxt`
## Exporting a tflite model from a frozen graph
...
...
@@ -40,10 +40,10 @@ FROZEN_GRAPH_PATH={path to exported tflite_graph.pb}
EXPORT_PATH
={
path to filename that will be used
for
export
}
PIPELINE_CONFIG_PATH
={
path to pipeline config
}
python lstm_object_detection/export_tflite_lstd_model.py
\
--export_path
${
EXPORT_PATH
}
\
--frozen_graph_path
${
FROZEN_GRAPH_PATH
}
\
--pipeline_config_path
${
PIPELINE_CONFIG_PATH
}
--export_path
${
EXPORT_PATH
}
\
--frozen_graph_path
${
FROZEN_GRAPH_PATH
}
\
--pipeline_config_path
${
PIPELINE_CONFIG_PATH
}
```
After export, you should see the file ${EXPORT_PATH} containing the FlatBuffer
model to be used by an application.
\ No newline at end of file
model to be used by an application.
research/lstm_object_detection/test_tflite_model.py
View file @
1f5a5e9d
...
...
@@ -13,6 +13,9 @@
# limitations under the License.
# ==============================================================================
"""Test a tflite model using random input data."""
from
__future__
import
print_function
from
absl
import
flags
import
numpy
as
np
import
tensorflow
as
tf
...
...
@@ -23,28 +26,28 @@ FLAGS = flags.FLAGS
def
main
(
_
):
flags
.
mark_flag_as_required
(
'model_path'
)
flags
.
mark_flag_as_required
(
'model_path'
)
# Load TFLite model and allocate tensors.
interpreter
=
tf
.
lite
.
Interpreter
(
model_path
=
FLAGS
.
model_path
)
interpreter
.
allocate_tensors
()
# Load TFLite model and allocate tensors.
interpreter
=
tf
.
lite
.
Interpreter
(
model_path
=
FLAGS
.
model_path
)
interpreter
.
allocate_tensors
()
# Get input and output tensors.
input_details
=
interpreter
.
get_input_details
()
print
'input_details:'
,
input_details
output_details
=
interpreter
.
get_output_details
()
print
'output_details:'
,
output_details
# Get input and output tensors.
input_details
=
interpreter
.
get_input_details
()
print
(
'input_details:'
,
input_details
)
output_details
=
interpreter
.
get_output_details
()
print
(
'output_details:'
,
output_details
)
# Test model on random input data.
input_shape
=
input_details
[
0
][
'shape'
]
# change the following line to feed into your own data.
input_data
=
np
.
array
(
np
.
random
.
random_sample
(
input_shape
),
dtype
=
np
.
float32
)
interpreter
.
set_tensor
(
input_details
[
0
][
'index'
],
input_data
)
# Test model on random input data.
input_shape
=
input_details
[
0
][
'shape'
]
# change the following line to feed into your own data.
input_data
=
np
.
array
(
np
.
random
.
random_sample
(
input_shape
),
dtype
=
np
.
float32
)
interpreter
.
set_tensor
(
input_details
[
0
][
'index'
],
input_data
)
interpreter
.
invoke
()
output_data
=
interpreter
.
get_tensor
(
output_details
[
0
][
'index'
])
print
output_data
interpreter
.
invoke
()
output_data
=
interpreter
.
get_tensor
(
output_details
[
0
][
'index'
])
print
(
output_data
)
if
__name__
==
'__main__'
:
tf
.
app
.
run
()
tf
.
app
.
run
()
research/lstm_object_detection/tflite/BUILD
View file @
1f5a5e9d
...
...
@@ -59,12 +59,19 @@ cc_library(
name
=
"mobile_lstd_tflite_client"
,
srcs
=
[
"mobile_lstd_tflite_client.cc"
],
hdrs
=
[
"mobile_lstd_tflite_client.h"
],
defines
=
select
({
"//conditions:default"
:
[],
"enable_edgetpu"
:
[
"ENABLE_EDGETPU"
],
}),
deps
=
[
":mobile_ssd_client"
,
":mobile_ssd_tflite_client"
,
"@com_google_absl//absl/base:core_headers"
,
"@com_google_glog//:glog"
,
"@com_google_absl//absl/base:core_headers"
,
"@org_tensorflow//tensorflow/lite/kernels:builtin_ops"
,
],
]
+
select
({
"//conditions:default"
:
[],
"enable_edgetpu"
:
[
"@libedgetpu//libedgetpu:header"
],
}),
alwayslink
=
1
,
)
research/lstm_object_detection/tflite/WORKSPACE
View file @
1f5a5e9d
...
...
@@ -90,12 +90,6 @@ http_archive(
sha256
=
"79d102c61e2a479a0b7e5fc167bcfaa4832a0c6aad4a75fa7da0480564931bcc"
,
)
#
# http_archive(
# name = "com_google_protobuf",
# strip_prefix = "protobuf-master",
# urls = ["https://github.com/protocolbuffers/protobuf/archive/master.zip"],
# )
# Needed by TensorFlow
http_archive
(
...
...
research/lstm_object_detection/tflite/mobile_lstd_tflite_client.cc
View file @
1f5a5e9d
...
...
@@ -66,6 +66,11 @@ bool MobileLSTDTfLiteClient::InitializeInterpreter(
interpreter_
->
UseNNAPI
(
false
);
}
#ifdef ENABLE_EDGETPU
interpreter_
->
SetExternalContext
(
kTfLiteEdgeTpuContext
,
edge_tpu_context_
.
get
());
#endif
// Inputs are: normalized_input_image_tensor, raw_inputs/init_lstm_c,
// raw_inputs/init_lstm_h
if
(
interpreter_
->
inputs
().
size
()
!=
3
)
{
...
...
research/lstm_object_detection/tflite/mobile_ssd_tflite_client.h
View file @
1f5a5e9d
...
...
@@ -76,6 +76,10 @@ class MobileSSDTfLiteClient : public MobileSSDClient {
std
::
unique_ptr
<::
tflite
::
MutableOpResolver
>
resolver_
;
std
::
unique_ptr
<::
tflite
::
Interpreter
>
interpreter_
;
#ifdef ENABLE_EDGETPU
std
::
unique_ptr
<
edgetpu
::
EdgeTpuContext
>
edge_tpu_context_
;
#endif
private:
// MobileSSDTfLiteClient is neither copyable nor movable.
MobileSSDTfLiteClient
(
const
MobileSSDTfLiteClient
&
)
=
delete
;
...
...
@@ -103,10 +107,6 @@ class MobileSSDTfLiteClient : public MobileSSDClient {
bool
FloatInference
(
const
uint8_t
*
input_data
);
bool
QuantizedInference
(
const
uint8_t
*
input_data
);
void
GetOutputBoxesAndScoreTensorsFromUInt8
();
#ifdef ENABLE_EDGETPU
std
::
unique_ptr
<
edgetpu
::
EdgeTpuContext
>
edge_tpu_context_
;
#endif
};
}
// namespace tflite
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment