Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ModelZoo
ResNet50_tensorflow
Commits
60ac72b5
Commit
60ac72b5
authored
May 04, 2021
by
vedanshu
Browse files
deepmac export script added
parent
8f58f396
Changes
4
Show whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
511 additions
and
0 deletions
+511
-0
official/vision/beta/projects/deepmac_maskrcnn/serving/detection.py
...ision/beta/projects/deepmac_maskrcnn/serving/detection.py
+147
-0
official/vision/beta/projects/deepmac_maskrcnn/serving/export_base.py
...ion/beta/projects/deepmac_maskrcnn/serving/export_base.py
+171
-0
official/vision/beta/projects/deepmac_maskrcnn/serving/export_saved_model.py
...a/projects/deepmac_maskrcnn/serving/export_saved_model.py
+100
-0
official/vision/beta/projects/deepmac_maskrcnn/serving/export_saved_model_lib.py
...ojects/deepmac_maskrcnn/serving/export_saved_model_lib.py
+93
-0
No files found.
official/vision/beta/projects/deepmac_maskrcnn/serving/detection.py
0 → 100644
View file @
60ac72b5
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Detection input and model functions for serving/inference."""
import
tensorflow
as
tf
from
official.vision.beta.projects.deepmac_maskrcnn
import
configs
from
official.vision.beta.projects.deepmac_maskrcnn.tasks
import
deep_mask_head_rcnn
from
official.vision.beta.ops
import
anchor
from
official.vision.beta.ops
import
preprocess_ops
from
official.vision.beta.projects.deepmac_maskrcnn.serving
import
export_base
MEAN_RGB
=
(
0.485
*
255
,
0.456
*
255
,
0.406
*
255
)
STDDEV_RGB
=
(
0.229
*
255
,
0.224
*
255
,
0.225
*
255
)
class
DetectionModule
(
export_base
.
ExportModule
):
"""Detection Module."""
def
_build_model
(
self
):
if
self
.
_batch_size
is
None
:
ValueError
(
"batch_size can't be None for detection models"
)
if
not
self
.
params
.
task
.
model
.
detection_generator
.
use_batched_nms
:
ValueError
(
'Only batched_nms is supported.'
)
input_specs
=
tf
.
keras
.
layers
.
InputSpec
(
shape
=
[
self
.
_batch_size
]
+
self
.
_input_image_size
+
[
3
])
if
isinstance
(
self
.
params
.
task
.
model
,
configs
.
deep_mask_head_rcnn
.
DeepMaskHeadRCNN
):
model
=
deep_mask_head_rcnn
.
build_maskrcnn
(
input_specs
=
input_specs
,
model_config
=
self
.
params
.
task
.
model
)
else
:
raise
ValueError
(
'Detection module not implemented for {} model.'
.
format
(
type
(
self
.
params
.
task
.
model
)))
return
model
def
_build_inputs
(
self
,
image
):
"""Builds detection model inputs for serving."""
model_params
=
self
.
params
.
task
.
model
# Normalizes image with mean and std pixel values.
image
=
preprocess_ops
.
normalize_image
(
image
,
offset
=
MEAN_RGB
,
scale
=
STDDEV_RGB
)
image
,
image_info
=
preprocess_ops
.
resize_and_crop_image
(
image
,
self
.
_input_image_size
,
padded_size
=
preprocess_ops
.
compute_padded_size
(
self
.
_input_image_size
,
2
**
model_params
.
max_level
),
aug_scale_min
=
1.0
,
aug_scale_max
=
1.0
)
input_anchor
=
anchor
.
build_anchor_generator
(
min_level
=
model_params
.
min_level
,
max_level
=
model_params
.
max_level
,
num_scales
=
model_params
.
anchor
.
num_scales
,
aspect_ratios
=
model_params
.
anchor
.
aspect_ratios
,
anchor_size
=
model_params
.
anchor
.
anchor_size
)
anchor_boxes
=
input_anchor
(
image_size
=
(
self
.
_input_image_size
[
0
],
self
.
_input_image_size
[
1
]))
return
image
,
anchor_boxes
,
image_info
def
serve
(
self
,
images
:
tf
.
Tensor
):
"""Cast image to float and run inference.
Args:
images: uint8 Tensor of shape [batch_size, None, None, 3]
Returns:
Tensor holding detection output logits.
"""
model_params
=
self
.
params
.
task
.
model
with
tf
.
device
(
'cpu:0'
):
images
=
tf
.
cast
(
images
,
dtype
=
tf
.
float32
)
# Tensor Specs for map_fn outputs (images, anchor_boxes, and image_info).
images_spec
=
tf
.
TensorSpec
(
shape
=
self
.
_input_image_size
+
[
3
],
dtype
=
tf
.
float32
)
num_anchors
=
model_params
.
anchor
.
num_scales
*
len
(
model_params
.
anchor
.
aspect_ratios
)
*
4
anchor_shapes
=
[]
for
level
in
range
(
model_params
.
min_level
,
model_params
.
max_level
+
1
):
anchor_level_spec
=
tf
.
TensorSpec
(
shape
=
[
self
.
_input_image_size
[
0
]
//
2
**
level
,
self
.
_input_image_size
[
1
]
//
2
**
level
,
num_anchors
],
dtype
=
tf
.
float32
)
anchor_shapes
.
append
((
str
(
level
),
anchor_level_spec
))
image_info_spec
=
tf
.
TensorSpec
(
shape
=
[
4
,
2
],
dtype
=
tf
.
float32
)
images
,
anchor_boxes
,
image_info
=
tf
.
nest
.
map_structure
(
tf
.
identity
,
tf
.
map_fn
(
self
.
_build_inputs
,
elems
=
images
,
fn_output_signature
=
(
images_spec
,
dict
(
anchor_shapes
),
image_info_spec
),
parallel_iterations
=
32
))
input_image_shape
=
image_info
[:,
1
,
:]
# To overcome keras.Model extra limitation to save a model with layers that
# have multiple inputs, we use `model.call` here to trigger the forward
# path. Note that, this disables some keras magics happens in `__call__`.
detections
=
self
.
model
.
call
(
images
=
images
,
image_shape
=
input_image_shape
,
anchor_boxes
=
anchor_boxes
,
training
=
False
)
if
self
.
params
.
task
.
model
.
detection_generator
.
apply_nms
:
final_outputs
=
{
'detection_boxes'
:
detections
[
'detection_boxes'
],
'detection_scores'
:
detections
[
'detection_scores'
],
'detection_classes'
:
detections
[
'detection_classes'
],
'num_detections'
:
detections
[
'num_detections'
]
}
else
:
final_outputs
=
{
'decoded_boxes'
:
detections
[
'decoded_boxes'
],
'decoded_box_scores'
:
detections
[
'decoded_box_scores'
],
'cls_outputs'
:
detections
[
'cls_outputs'
],
'box_outputs'
:
detections
[
'box_outputs'
]
}
if
'detection_masks'
in
detections
.
keys
():
final_outputs
[
'detection_masks'
]
=
detections
[
'detection_masks'
]
final_outputs
.
update
({
'image_info'
:
image_info
})
return
final_outputs
\ No newline at end of file
official/vision/beta/projects/deepmac_maskrcnn/serving/export_base.py
0 → 100644
View file @
60ac72b5
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Base class for model export."""
import
abc
from
typing
import
Dict
,
List
,
Mapping
,
Optional
,
Text
import
tensorflow
as
tf
from
official.core
import
export_base
from
official.modeling.hyperparams
import
config_definitions
as
cfg
class
ExportModule
(
export_base
.
ExportModule
,
metaclass
=
abc
.
ABCMeta
):
"""Base Export Module."""
def
__init__
(
self
,
params
:
cfg
.
ExperimentConfig
,
*
,
batch_size
:
int
,
input_image_size
:
List
[
int
],
num_channels
:
int
=
3
,
model
:
Optional
[
tf
.
keras
.
Model
]
=
None
):
"""Initializes a module for export.
Args:
params: Experiment params.
batch_size: The batch size of the model input. Can be `int` or None.
input_image_size: List or Tuple of size of the input image. For 2D image,
it is [height, width].
num_channels: The number of the image channels.
model: A tf.keras.Model instance to be exported.
"""
self
.
params
=
params
self
.
_batch_size
=
batch_size
self
.
_input_image_size
=
input_image_size
self
.
_num_channels
=
num_channels
if
model
is
None
:
model
=
self
.
_build_model
()
# pylint: disable=assignment-from-none
super
().
__init__
(
params
=
params
,
model
=
model
)
def
_decode_image
(
self
,
encoded_image_bytes
:
str
)
->
tf
.
Tensor
:
"""Decodes an image bytes to an image tensor.
Use `tf.image.decode_image` to decode an image if input is expected to be 2D
image; otherwise use `tf.io.decode_raw` to convert the raw bytes to tensor
and reshape it to desire shape.
Args:
encoded_image_bytes: An encoded image string to be decoded.
Returns:
A decoded image tensor.
"""
if
len
(
self
.
_input_image_size
)
==
2
:
# Decode an image if 2D input is expected.
image_tensor
=
tf
.
image
.
decode_image
(
encoded_image_bytes
,
channels
=
self
.
_num_channels
)
image_tensor
.
set_shape
((
None
,
None
,
self
.
_num_channels
))
else
:
# Convert raw bytes into a tensor and reshape it, if not 2D input.
image_tensor
=
tf
.
io
.
decode_raw
(
encoded_image_bytes
,
out_type
=
tf
.
uint8
)
image_tensor
=
tf
.
reshape
(
image_tensor
,
self
.
_input_image_size
+
[
self
.
_num_channels
])
return
image_tensor
def
_decode_tf_example
(
self
,
tf_example_string_tensor
:
tf
.
train
.
Example
)
->
tf
.
Tensor
:
"""Decodes a TF Example to an image tensor.
Args:
tf_example_string_tensor: A tf.train.Example of encoded image and other
information.
Returns:
A decoded image tensor.
"""
keys_to_features
=
{
'image/encoded'
:
tf
.
io
.
FixedLenFeature
((),
tf
.
string
)}
parsed_tensors
=
tf
.
io
.
parse_single_example
(
serialized
=
tf_example_string_tensor
,
features
=
keys_to_features
)
image_tensor
=
self
.
_decode_image
(
parsed_tensors
[
'image/encoded'
])
return
image_tensor
def
_build_model
(
self
,
**
kwargs
):
"""Returns a model built from the params."""
return
None
@
tf
.
function
def
inference_from_image_tensors
(
self
,
inputs
:
tf
.
Tensor
)
->
Mapping
[
str
,
tf
.
Tensor
]:
return
self
.
serve
(
inputs
)
@
tf
.
function
def
inference_from_image_bytes
(
self
,
inputs
:
tf
.
Tensor
):
with
tf
.
device
(
'cpu:0'
):
images
=
tf
.
nest
.
map_structure
(
tf
.
identity
,
tf
.
map_fn
(
self
.
_decode_image
,
elems
=
inputs
,
fn_output_signature
=
tf
.
TensorSpec
(
shape
=
[
None
]
*
len
(
self
.
_input_image_size
)
+
[
self
.
_num_channels
],
dtype
=
tf
.
uint8
),
parallel_iterations
=
32
))
images
=
tf
.
stack
(
images
)
return
self
.
serve
(
images
)
@
tf
.
function
def
inference_from_tf_example
(
self
,
inputs
:
tf
.
Tensor
)
->
Mapping
[
str
,
tf
.
Tensor
]:
with
tf
.
device
(
'cpu:0'
):
images
=
tf
.
nest
.
map_structure
(
tf
.
identity
,
tf
.
map_fn
(
self
.
_decode_tf_example
,
elems
=
inputs
,
# Height/width of the shape of input images is unspecified (None)
# at the time of decoding the example, but the shape will
# be adjusted to conform to the input layer of the model,
# by _run_inference_on_image_tensors() below.
fn_output_signature
=
tf
.
TensorSpec
(
shape
=
[
None
]
*
len
(
self
.
_input_image_size
)
+
[
self
.
_num_channels
],
dtype
=
tf
.
uint8
),
dtype
=
tf
.
uint8
,
parallel_iterations
=
32
))
images
=
tf
.
stack
(
images
)
return
self
.
serve
(
images
)
def
get_inference_signatures
(
self
,
function_keys
:
Dict
[
Text
,
Text
]):
"""Gets defined function signatures.
Args:
function_keys: A dictionary with keys as the function to create signature
for and values as the signature keys when returns.
Returns:
A dictionary with key as signature key and value as concrete functions
that can be used for tf.saved_model.save.
"""
signatures
=
{}
for
key
,
def_name
in
function_keys
.
items
():
if
key
==
'image_tensor'
:
input_signature
=
tf
.
TensorSpec
(
shape
=
[
self
.
_batch_size
]
+
[
None
]
*
len
(
self
.
_input_image_size
)
+
[
self
.
_num_channels
],
dtype
=
tf
.
uint8
)
signatures
[
def_name
]
=
self
.
inference_from_image_tensors
.
get_concrete_function
(
input_signature
)
elif
key
==
'image_bytes'
:
input_signature
=
tf
.
TensorSpec
(
shape
=
[
self
.
_batch_size
],
dtype
=
tf
.
string
)
signatures
[
def_name
]
=
self
.
inference_from_image_bytes
.
get_concrete_function
(
input_signature
)
elif
key
==
'serve_examples'
or
key
==
'tf_example'
:
input_signature
=
tf
.
TensorSpec
(
shape
=
[
self
.
_batch_size
],
dtype
=
tf
.
string
)
signatures
[
def_name
]
=
self
.
inference_from_tf_example
.
get_concrete_function
(
input_signature
)
else
:
raise
ValueError
(
'Unrecognized `input_type`'
)
return
signatures
\ No newline at end of file
official/vision/beta/projects/deepmac_maskrcnn/serving/export_saved_model.py
0 → 100644
View file @
60ac72b5
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
r
"""Vision models export binary for serving/inference.
To export a trained checkpoint in saved_model format (shell script):
CHECKPOINT_PATH = XX
EXPORT_DIR_PATH = XX
CONFIG_FILE_PATH = XX
export_saved_model --export_dir=${EXPORT_DIR_PATH}/ \
--checkpoint_path=${CHECKPOINT_PATH} \
--config_file=${CONFIG_FILE_PATH} \
--batch_size=2 \
--input_image_size=224,224
To serve (python):
export_dir_path = XX
input_type = XX
input_images = XX
imported = tf.saved_model.load(export_dir_path)
model_fn = imported.signatures['serving_default']
output = model_fn(input_images)
"""
from
absl
import
app
from
absl
import
flags
from
official.vision.beta.projects.deepmac_maskrcnn.tasks
import
deep_mask_head_rcnn
from
official.core
import
exp_factory
from
official.modeling
import
hyperparams
from
official.vision.beta.projects.deepmac_maskrcnn.serving
import
export_saved_model_lib
FLAGS
=
flags
.
FLAGS
flags
.
DEFINE_string
(
'experiment'
,
'deep_mask_head_rcnn_resnetfpn_coco'
,
'experiment type, e.g. retinanet_resnetfpn_coco'
)
flags
.
DEFINE_string
(
'export_dir'
,
None
,
'The export directory.'
)
flags
.
DEFINE_string
(
'checkpoint_path'
,
None
,
'Checkpoint path.'
)
flags
.
DEFINE_multi_string
(
'config_file'
,
default
=
None
,
help
=
'YAML/JSON files which specifies overrides. The override order '
'follows the order of args. Note that each file '
'can be used as an override template to override the default parameters '
'specified in Python. If the same parameter is specified in both '
'`--config_file` and `--params_override`, `config_file` will be used '
'first, followed by params_override.'
)
flags
.
DEFINE_string
(
'params_override'
,
''
,
'The JSON/YAML file or string which specifies the parameter to be overriden'
' on top of `config_file` template.'
)
flags
.
DEFINE_integer
(
'batch_size'
,
None
,
'The batch size.'
)
flags
.
DEFINE_string
(
'input_type'
,
'image_tensor'
,
'One of `image_tensor`, `image_bytes`, `tf_example`.'
)
flags
.
DEFINE_string
(
'input_image_size'
,
'224,224'
,
'The comma-separated string of two integers representing the height,width '
'of the input to the model.'
)
def
main
(
_
):
params
=
exp_factory
.
get_exp_config
(
FLAGS
.
experiment
)
for
config_file
in
FLAGS
.
config_file
or
[]:
params
=
hyperparams
.
override_params_dict
(
params
,
config_file
,
is_strict
=
True
)
if
FLAGS
.
params_override
:
params
=
hyperparams
.
override_params_dict
(
params
,
FLAGS
.
params_override
,
is_strict
=
True
)
params
.
validate
()
params
.
lock
()
export_saved_model_lib
.
export_inference_graph
(
input_type
=
FLAGS
.
input_type
,
batch_size
=
FLAGS
.
batch_size
,
input_image_size
=
[
int
(
x
)
for
x
in
FLAGS
.
input_image_size
.
split
(
','
)],
params
=
params
,
checkpoint_path
=
FLAGS
.
checkpoint_path
,
export_dir
=
FLAGS
.
export_dir
,
export_checkpoint_subdir
=
'checkpoint'
,
export_saved_model_subdir
=
'saved_model'
)
if
__name__
==
'__main__'
:
app
.
run
(
main
)
\ No newline at end of file
official/vision/beta/projects/deepmac_maskrcnn/serving/export_saved_model_lib.py
0 → 100644
View file @
60ac72b5
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
r
"""Vision models export utility function for serving/inference."""
import
os
from
typing
import
Optional
,
List
import
tensorflow
as
tf
from
official.core
import
config_definitions
as
cfg
from
official.core
import
export_base
from
official.core
import
train_utils
from
official.vision.beta.projects.deepmac_maskrcnn
import
configs
from
official.vision.beta.projects.deepmac_maskrcnn.serving
import
detection
def
export_inference_graph
(
input_type
:
str
,
batch_size
:
Optional
[
int
],
input_image_size
:
List
[
int
],
params
:
cfg
.
ExperimentConfig
,
checkpoint_path
:
str
,
export_dir
:
str
,
num_channels
:
Optional
[
int
]
=
3
,
export_module
:
Optional
[
export_base
.
ExportModule
]
=
None
,
export_checkpoint_subdir
:
Optional
[
str
]
=
None
,
export_saved_model_subdir
:
Optional
[
str
]
=
None
):
"""Exports inference graph for the model specified in the exp config.
Saved model is stored at export_dir/saved_model, checkpoint is saved
at export_dir/checkpoint, and params is saved at export_dir/params.yaml.
Args:
input_type: One of `image_tensor`, `image_bytes`, `tf_example`.
batch_size: 'int', or None.
input_image_size: List or Tuple of height and width.
params: Experiment params.
checkpoint_path: Trained checkpoint path or directory.
export_dir: Export directory path.
num_channels: The number of input image channels.
export_module: Optional export module to be used instead of using params
to create one. If None, the params will be used to create an export
module.
export_checkpoint_subdir: Optional subdirectory under export_dir
to store checkpoint.
export_saved_model_subdir: Optional subdirectory under export_dir
to store saved model.
"""
if
export_checkpoint_subdir
:
output_checkpoint_directory
=
os
.
path
.
join
(
export_dir
,
export_checkpoint_subdir
)
else
:
output_checkpoint_directory
=
export_dir
if
export_saved_model_subdir
:
output_saved_model_directory
=
os
.
path
.
join
(
export_dir
,
export_saved_model_subdir
)
else
:
output_saved_model_directory
=
export_dir
# TODO(arashwan): Offers a direct path to use ExportModule with Task objects.
if
not
export_module
:
if
isinstance
(
params
.
task
,
configs
.
deep_mask_head_rcnn
.
DeepMaskHeadRCNNTask
):
export_module
=
detection
.
DetectionModule
(
params
=
params
,
batch_size
=
batch_size
,
input_image_size
=
input_image_size
,
num_channels
=
num_channels
)
else
:
raise
ValueError
(
'Export module not implemented for {} task.'
.
format
(
type
(
params
.
task
)))
export_base
.
export
(
export_module
,
function_keys
=
[
input_type
],
export_savedmodel_dir
=
output_saved_model_directory
,
checkpoint_path
=
checkpoint_path
,
timestamped
=
False
)
ckpt
=
tf
.
train
.
Checkpoint
(
model
=
export_module
.
model
)
ckpt
.
save
(
os
.
path
.
join
(
output_checkpoint_directory
,
'ckpt'
))
train_utils
.
serialize_config
(
params
,
export_dir
)
\ No newline at end of file
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment