Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ModelZoo
ResNet50_tensorflow
Commits
7fb0abb2
Commit
7fb0abb2
authored
Apr 11, 2022
by
A. Unique TensorFlower
Browse files
Internal change
PiperOrigin-RevId: 441033117
parent
3046cda7
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
364 additions
and
0 deletions
+364
-0
official/vision/beta/projects/yolo/serving/export_module_factory.py
...ision/beta/projects/yolo/serving/export_module_factory.py
+175
-0
official/vision/beta/projects/yolo/serving/export_saved_model.py
...l/vision/beta/projects/yolo/serving/export_saved_model.py
+107
-0
official/vision/beta/projects/yolo/serving/model_fn.py
official/vision/beta/projects/yolo/serving/model_fn.py
+82
-0
No files found.
official/vision/beta/projects/yolo/serving/export_module_factory.py
0 → 100644
View file @
7fb0abb2
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Factory for YOLO export modules."""
from
typing
import
List
,
Optional
import
tensorflow
as
tf
from
official.core
import
config_definitions
as
cfg
from
official.vision.beta
import
configs
from
official.vision.beta.dataloaders
import
classification_input
from
official.vision.beta.modeling
import
factory
from
official.vision.beta.projects.yolo.configs.yolo
import
YoloTask
from
official.vision.beta.projects.yolo.modeling
import
factory
as
yolo_factory
from
official.vision.beta.projects.yolo.modeling.backbones
import
darknet
# pylint: disable=unused-import
from
official.vision.beta.projects.yolo.modeling.decoders
import
yolo_decoder
# pylint: disable=unused-import
from
official.vision.beta.projects.yolo.serving
import
model_fn
as
yolo_model_fn
from
official.vision.beta.serving
import
export_base_v2
as
export_base
from
official.vision.beta.serving
import
export_utils
def
create_classification_export_module
(
params
:
cfg
.
ExperimentConfig
,
input_type
:
str
,
batch_size
:
int
,
input_image_size
:
List
[
int
],
num_channels
:
int
=
3
)
->
export_base
.
ExportModule
:
"""Creates classification export module."""
input_signature
=
export_utils
.
get_image_input_signatures
(
input_type
,
batch_size
,
input_image_size
,
num_channels
)
input_specs
=
tf
.
keras
.
layers
.
InputSpec
(
shape
=
[
batch_size
]
+
input_image_size
+
[
num_channels
])
model
=
factory
.
build_classification_model
(
input_specs
=
input_specs
,
model_config
=
params
.
task
.
model
,
l2_regularizer
=
None
)
def
preprocess_fn
(
inputs
):
image_tensor
=
export_utils
.
parse_image
(
inputs
,
input_type
,
input_image_size
,
num_channels
)
# If input_type is `tflite`, do not apply image preprocessing.
if
input_type
==
'tflite'
:
return
image_tensor
def
preprocess_image_fn
(
inputs
):
return
classification_input
.
Parser
.
inference_fn
(
inputs
,
input_image_size
,
num_channels
)
images
=
tf
.
map_fn
(
preprocess_image_fn
,
elems
=
image_tensor
,
fn_output_signature
=
tf
.
TensorSpec
(
shape
=
input_image_size
+
[
num_channels
],
dtype
=
tf
.
float32
))
return
images
def
postprocess_fn
(
logits
):
probs
=
tf
.
nn
.
softmax
(
logits
)
return
{
'logits'
:
logits
,
'probs'
:
probs
}
export_module
=
export_base
.
ExportModule
(
params
,
model
=
model
,
input_signature
=
input_signature
,
preprocessor
=
preprocess_fn
,
postprocessor
=
postprocess_fn
)
return
export_module
def
create_yolo_export_module
(
params
:
cfg
.
ExperimentConfig
,
input_type
:
str
,
batch_size
:
int
,
input_image_size
:
List
[
int
],
num_channels
:
int
=
3
)
->
export_base
.
ExportModule
:
"""Creates YOLO export module."""
input_signature
=
export_utils
.
get_image_input_signatures
(
input_type
,
batch_size
,
input_image_size
,
num_channels
)
input_specs
=
tf
.
keras
.
layers
.
InputSpec
(
shape
=
[
batch_size
]
+
input_image_size
+
[
num_channels
])
model
,
_
=
yolo_factory
.
build_yolo
(
input_specs
=
input_specs
,
model_config
=
params
.
task
.
model
,
l2_regularization
=
None
)
def
preprocess_fn
(
inputs
):
image_tensor
=
export_utils
.
parse_image
(
inputs
,
input_type
,
input_image_size
,
num_channels
)
# If input_type is `tflite`, do not apply image preprocessing.
if
input_type
==
'tflite'
:
return
image_tensor
def
preprocess_image_fn
(
inputs
):
image
=
tf
.
cast
(
inputs
,
dtype
=
tf
.
float32
)
image
=
image
/
255.
(
image
,
image_info
)
=
yolo_model_fn
.
letterbox
(
image
,
input_image_size
,
letter_box
=
params
.
task
.
validation_data
.
parser
.
letter_box
)
return
image
,
image_info
images_spec
=
tf
.
TensorSpec
(
shape
=
input_image_size
+
[
3
],
dtype
=
tf
.
float32
)
image_info_spec
=
tf
.
TensorSpec
(
shape
=
[
4
,
2
],
dtype
=
tf
.
float32
)
images
,
image_info
=
tf
.
nest
.
map_structure
(
tf
.
identity
,
tf
.
map_fn
(
preprocess_image_fn
,
elems
=
image_tensor
,
fn_output_signature
=
(
images_spec
,
image_info_spec
),
parallel_iterations
=
32
))
return
images
,
image_info
def
inference_steps
(
inputs
,
model
):
images
,
image_info
=
inputs
detection
=
model
(
images
,
training
=
False
)
detection
[
'bbox'
]
=
yolo_model_fn
.
undo_info
(
detection
[
'bbox'
],
detection
[
'num_detections'
],
image_info
,
expand
=
False
)
final_outputs
=
{
'detection_boxes'
:
detection
[
'bbox'
],
'detection_scores'
:
detection
[
'confidence'
],
'detection_classes'
:
detection
[
'classes'
],
'num_detections'
:
detection
[
'num_detections'
]
}
return
final_outputs
export_module
=
export_base
.
ExportModule
(
params
,
model
=
model
,
input_signature
=
input_signature
,
preprocessor
=
preprocess_fn
,
inference_step
=
inference_steps
)
return
export_module
def
get_export_module
(
params
:
cfg
.
ExperimentConfig
,
input_type
:
str
,
batch_size
:
Optional
[
int
],
input_image_size
:
List
[
int
],
num_channels
:
int
=
3
)
->
export_base
.
ExportModule
:
"""Factory for export modules."""
if
isinstance
(
params
.
task
,
configs
.
image_classification
.
ImageClassificationTask
):
export_module
=
create_classification_export_module
(
params
,
input_type
,
batch_size
,
input_image_size
,
num_channels
)
elif
isinstance
(
params
.
task
,
YoloTask
):
export_module
=
create_yolo_export_module
(
params
,
input_type
,
batch_size
,
input_image_size
,
num_channels
)
else
:
raise
ValueError
(
'Export module not implemented for {} task.'
.
format
(
type
(
params
.
task
)))
return
export_module
official/vision/beta/projects/yolo/serving/export_saved_model.py
0 → 100644
View file @
7fb0abb2
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r
"""YOLO model export binary for serving/inference.
To export a trained checkpoint in saved_model format (shell script):
CHECKPOINT_PATH = XX
EXPORT_DIR_PATH = XX
CONFIG_FILE_PATH = XX
export_saved_model --export_dir=${EXPORT_DIR_PATH}/ \
--checkpoint_path=${CHECKPOINT_PATH} \
--config_file=${CONFIG_FILE_PATH} \
--batch_size=2 \
--input_image_size=224,224
To serve (python):
export_dir_path = XX
input_type = XX
input_images = XX
imported = tf.saved_model.load(export_dir_path)
model_fn = imported.signatures['serving_default']
output = model_fn(input_images)
"""
from
absl
import
app
from
absl
import
flags
from
official.core
import
exp_factory
from
official.modeling
import
hyperparams
from
official.vision.beta.projects.yolo.configs
import
yolo
as
cfg
# pylint: disable=unused-import
from
official.vision.beta.projects.yolo.serving
import
export_module_factory
from
official.vision.beta.projects.yolo.tasks
import
yolo
as
task
# pylint: disable=unused-import
from
official.vision.serving
import
export_saved_model_lib
FLAGS
=
flags
.
FLAGS
flags
.
DEFINE_string
(
'experiment'
,
'scaled_yolo'
,
'experiment type, e.g. scaled_yolo'
)
flags
.
DEFINE_string
(
'export_dir'
,
None
,
'The export directory.'
)
flags
.
DEFINE_string
(
'checkpoint_path'
,
None
,
'Checkpoint path.'
)
flags
.
DEFINE_multi_string
(
'config_file'
,
default
=
None
,
help
=
'YAML/JSON files which specifies overrides. The override order '
'follows the order of args. Note that each file '
'can be used as an override template to override the default parameters '
'specified in Python. If the same parameter is specified in both '
'`--config_file` and `--params_override`, `config_file` will be used '
'first, followed by params_override.'
)
flags
.
DEFINE_string
(
'params_override'
,
''
,
'The JSON/YAML file or string which specifies the parameter to be overriden'
' on top of `config_file` template.'
)
flags
.
DEFINE_integer
(
'batch_size'
,
1
,
'The batch size.'
)
flags
.
DEFINE_string
(
'input_type'
,
'image_tensor'
,
'One of `image_tensor`, `image_bytes`, `tf_example`.'
)
flags
.
DEFINE_string
(
'input_image_size'
,
'224,224'
,
'The comma-separated string of two integers representing the height,width '
'of the input to the model.'
)
def
main
(
_
):
params
=
exp_factory
.
get_exp_config
(
FLAGS
.
experiment
)
for
config_file
in
FLAGS
.
config_file
or
[]:
params
=
hyperparams
.
override_params_dict
(
params
,
config_file
,
is_strict
=
True
)
if
FLAGS
.
params_override
:
params
=
hyperparams
.
override_params_dict
(
params
,
FLAGS
.
params_override
,
is_strict
=
True
)
params
.
validate
()
params
.
lock
()
input_image_size
=
[
int
(
x
)
for
x
in
FLAGS
.
input_image_size
.
split
(
','
)]
export_module
=
export_module_factory
.
get_export_module
(
params
=
params
,
input_type
=
FLAGS
.
input_type
,
batch_size
=
FLAGS
.
batch_size
,
input_image_size
=
[
int
(
x
)
for
x
in
FLAGS
.
input_image_size
.
split
(
','
)],
num_channels
=
3
)
export_saved_model_lib
.
export_inference_graph
(
input_type
=
FLAGS
.
input_type
,
batch_size
=
FLAGS
.
batch_size
,
input_image_size
=
input_image_size
,
params
=
params
,
checkpoint_path
=
FLAGS
.
checkpoint_path
,
export_dir
=
FLAGS
.
export_dir
,
export_module
=
export_module
)
if
__name__
==
'__main__'
:
app
.
run
(
main
)
official/vision/beta/projects/yolo/serving/model_fn.py
0 → 100644
View file @
7fb0abb2
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""YOLO input and model functions for serving/inference."""
from
typing
import
List
,
Tuple
import
tensorflow
as
tf
from
official.vision.beta.projects.yolo.ops
import
preprocessing_ops
from
official.vision.ops
import
box_ops
def
letterbox
(
image
:
tf
.
Tensor
,
desired_size
:
List
[
int
],
letter_box
:
bool
=
True
)
->
Tuple
[
tf
.
Tensor
,
tf
.
Tensor
]:
"""Letter box an image for image serving."""
with
tf
.
name_scope
(
'letter_box'
):
image_size
=
tf
.
cast
(
preprocessing_ops
.
get_image_shape
(
image
),
tf
.
float32
)
scaled_size
=
tf
.
cast
(
desired_size
,
image_size
.
dtype
)
if
letter_box
:
scale
=
tf
.
minimum
(
scaled_size
[
0
]
/
image_size
[
0
],
scaled_size
[
1
]
/
image_size
[
1
])
scaled_size
=
tf
.
round
(
image_size
*
scale
)
else
:
scale
=
1.0
# Computes 2D image_scale.
image_scale
=
scaled_size
/
image_size
image_offset
=
tf
.
cast
((
desired_size
-
scaled_size
)
*
0.5
,
tf
.
int32
)
offset
=
(
scaled_size
-
desired_size
)
*
0.5
scaled_image
=
tf
.
image
.
resize
(
image
,
tf
.
cast
(
scaled_size
,
tf
.
int32
),
method
=
'nearest'
)
output_image
=
tf
.
image
.
pad_to_bounding_box
(
scaled_image
,
image_offset
[
0
],
image_offset
[
1
],
desired_size
[
0
],
desired_size
[
1
])
image_info
=
tf
.
stack
([
image_size
,
tf
.
cast
(
desired_size
,
dtype
=
tf
.
float32
),
image_scale
,
tf
.
cast
(
offset
,
tf
.
float32
)
])
return
output_image
,
image_info
def
undo_info
(
boxes
:
tf
.
Tensor
,
num_detections
:
int
,
info
:
tf
.
Tensor
,
expand
:
bool
=
True
)
->
tf
.
Tensor
:
"""Clip and normalize boxes for serving."""
mask
=
tf
.
sequence_mask
(
num_detections
,
maxlen
=
tf
.
shape
(
boxes
)[
1
])
boxes
=
tf
.
cast
(
tf
.
expand_dims
(
mask
,
axis
=-
1
),
boxes
.
dtype
)
*
boxes
if
expand
:
info
=
tf
.
cast
(
tf
.
expand_dims
(
info
,
axis
=
0
),
boxes
.
dtype
)
inshape
=
tf
.
expand_dims
(
info
[:,
1
,
:],
axis
=
1
)
ogshape
=
tf
.
expand_dims
(
info
[:,
0
,
:],
axis
=
1
)
scale
=
tf
.
expand_dims
(
info
[:,
2
,
:],
axis
=
1
)
offset
=
tf
.
expand_dims
(
info
[:,
3
,
:],
axis
=
1
)
boxes
=
box_ops
.
denormalize_boxes
(
boxes
,
inshape
)
boxes
+=
tf
.
tile
(
offset
,
[
1
,
1
,
2
])
boxes
/=
tf
.
tile
(
scale
,
[
1
,
1
,
2
])
boxes
=
box_ops
.
clip_boxes
(
boxes
,
ogshape
)
boxes
=
box_ops
.
normalize_boxes
(
boxes
,
ogshape
)
return
boxes
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment