Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ModelZoo
ResNet50_tensorflow
Commits
f97074b0
Commit
f97074b0
authored
Oct 06, 2020
by
Abdullah Rashwan
Committed by
A. Unique TensorFlower
Oct 06, 2020
Browse files
Internal change
PiperOrigin-RevId: 335692122
parent
92e115a1
Changes
5
Hide whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
295 additions
and
5 deletions
+295
-5
official/vision/beta/serving/detection.py
official/vision/beta/serving/detection.py
+140
-0
official/vision/beta/serving/detection_test.py
official/vision/beta/serving/detection_test.py
+143
-0
official/vision/beta/serving/export_base.py
official/vision/beta/serving/export_base.py
+3
-3
official/vision/beta/serving/export_saved_model.py
official/vision/beta/serving/export_saved_model.py
+8
-1
official/vision/beta/serving/image_classification.py
official/vision/beta/serving/image_classification.py
+1
-1
No files found.
official/vision/beta/serving/detection.py
0 → 100644
View file @
f97074b0
# Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Detection input and model functions for serving/inference."""
import
tensorflow
as
tf
from
official.vision.beta
import
configs
from
official.vision.beta.modeling
import
factory
from
official.vision.beta.ops
import
anchor
from
official.vision.beta.ops
import
preprocess_ops
from
official.vision.beta.serving
import
export_base
MEAN_RGB
=
(
0.485
*
255
,
0.456
*
255
,
0.406
*
255
)
STDDEV_RGB
=
(
0.229
*
255
,
0.224
*
255
,
0.225
*
255
)
class
DetectionModule
(
export_base
.
ExportModule
):
"""Detection Module."""
def
build_model
(
self
):
if
self
.
_batch_size
is
None
:
ValueError
(
"batch_size can't be None for detection models"
)
if
not
self
.
_params
.
task
.
model
.
detection_generator
.
use_batched_nms
:
ValueError
(
'Only batched_nms is supported.'
)
input_specs
=
tf
.
keras
.
layers
.
InputSpec
(
shape
=
[
self
.
_batch_size
]
+
self
.
_input_image_size
+
[
3
])
if
isinstance
(
self
.
_params
.
task
.
model
,
configs
.
maskrcnn
.
MaskRCNN
):
self
.
_model
=
factory
.
build_maskrcnn
(
input_specs
=
input_specs
,
model_config
=
self
.
_params
.
task
.
model
)
elif
isinstance
(
self
.
_params
.
task
.
model
,
configs
.
retinanet
.
RetinaNet
):
self
.
_model
=
factory
.
build_retinanet
(
input_specs
=
input_specs
,
model_config
=
self
.
_params
.
task
.
model
)
else
:
raise
ValueError
(
'Detection module not implemented for {} model.'
.
format
(
type
(
self
.
_params
.
task
.
model
)))
return
self
.
_model
def
_build_inputs
(
self
,
image
):
"""Builds classification model inputs for serving."""
model_params
=
self
.
_params
.
task
.
model
# Normalizes image with mean and std pixel values.
image
=
preprocess_ops
.
normalize_image
(
image
,
offset
=
MEAN_RGB
,
scale
=
STDDEV_RGB
)
image
,
image_info
=
preprocess_ops
.
resize_and_crop_image
(
image
,
self
.
_input_image_size
,
padded_size
=
preprocess_ops
.
compute_padded_size
(
self
.
_input_image_size
,
2
**
model_params
.
max_level
),
aug_scale_min
=
1.0
,
aug_scale_max
=
1.0
)
image_shape
=
image_info
[
1
,
:]
# Shape of original image.
input_anchor
=
anchor
.
build_anchor_generator
(
min_level
=
model_params
.
min_level
,
max_level
=
model_params
.
max_level
,
num_scales
=
model_params
.
anchor
.
num_scales
,
aspect_ratios
=
model_params
.
anchor
.
aspect_ratios
,
anchor_size
=
model_params
.
anchor
.
anchor_size
)
anchor_boxes
=
input_anchor
(
image_size
=
(
self
.
_input_image_size
[
0
],
self
.
_input_image_size
[
1
]))
return
image
,
anchor_boxes
,
image_shape
def
_run_inference_on_image_tensors
(
self
,
images
:
tf
.
Tensor
):
"""Cast image to float and run inference.
Args:
images: uint8 Tensor of shape [batch_size, None, None, 3]
Returns:
Tensor holding classification output logits.
"""
model_params
=
self
.
_params
.
task
.
model
with
tf
.
device
(
'cpu:0'
):
images
=
tf
.
cast
(
images
,
dtype
=
tf
.
float32
)
# Tensor Specs for map_fn outputs (images, anchor_boxes, and image_info).
images_spec
=
tf
.
TensorSpec
(
shape
=
self
.
_input_image_size
+
[
3
],
dtype
=
tf
.
float32
)
num_anchors
=
model_params
.
anchor
.
num_scales
*
len
(
model_params
.
anchor
.
aspect_ratios
)
*
4
anchor_shapes
=
[]
for
level
in
range
(
model_params
.
min_level
,
model_params
.
max_level
+
1
):
anchor_level_spec
=
tf
.
TensorSpec
(
shape
=
[
self
.
_input_image_size
[
0
]
//
2
**
level
,
self
.
_input_image_size
[
1
]
//
2
**
level
,
num_anchors
],
dtype
=
tf
.
float32
)
anchor_shapes
.
append
((
str
(
level
),
anchor_level_spec
))
image_shape_spec
=
tf
.
TensorSpec
(
shape
=
[
2
,],
dtype
=
tf
.
float32
)
images
,
anchor_boxes
,
image_shape
=
tf
.
nest
.
map_structure
(
tf
.
identity
,
tf
.
map_fn
(
self
.
_build_inputs
,
elems
=
images
,
fn_output_signature
=
(
images_spec
,
dict
(
anchor_shapes
),
image_shape_spec
),
parallel_iterations
=
32
))
detections
=
self
.
_model
.
call
(
images
=
images
,
image_shape
=
image_shape
,
anchor_boxes
=
anchor_boxes
,
training
=
False
)
final_outputs
=
{
'detection_boxes'
:
detections
[
'detection_boxes'
],
'detection_scores'
:
detections
[
'detection_scores'
],
'detection_classes'
:
detections
[
'detection_classes'
],
'num_detections'
:
detections
[
'num_detections'
]
}
if
'detection_masks'
in
detections
.
keys
():
final_outputs
.
update
(
'detection_masks'
,
detections
[
'detection_masks'
])
return
final_outputs
official/vision/beta/serving/detection_test.py
0 → 100644
View file @
f97074b0
# Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test for image classification export lib."""
import
io
import
os
from
absl.testing
import
parameterized
import
numpy
as
np
from
PIL
import
Image
import
tensorflow
as
tf
from
official.common
import
registry_imports
# pylint: disable=unused-import
from
official.core
import
exp_factory
from
official.vision.beta.serving
import
detection
class
DetectionExportTest
(
tf
.
test
.
TestCase
,
parameterized
.
TestCase
):
def
_get_detection_module
(
self
,
experiment_name
):
params
=
exp_factory
.
get_exp_config
(
experiment_name
)
params
.
task
.
model
.
backbone
.
resnet
.
model_id
=
18
params
.
task
.
model
.
detection_generator
.
use_batched_nms
=
True
detection_module
=
detection
.
DetectionModule
(
params
,
batch_size
=
1
,
input_image_size
=
[
640
,
640
])
return
detection_module
def
_export_from_module
(
self
,
module
,
input_type
,
batch_size
,
save_directory
):
if
input_type
==
'image_tensor'
:
input_signature
=
tf
.
TensorSpec
(
shape
=
[
batch_size
,
640
,
640
,
3
],
dtype
=
tf
.
uint8
)
signatures
=
{
'serving_default'
:
module
.
inference_from_image_tensors
.
get_concrete_function
(
input_signature
)
}
elif
input_type
==
'image_bytes'
:
input_signature
=
tf
.
TensorSpec
(
shape
=
[
batch_size
],
dtype
=
tf
.
string
)
signatures
=
{
'serving_default'
:
module
.
inference_from_image_bytes
.
get_concrete_function
(
input_signature
)
}
elif
input_type
==
'tf_example'
:
input_signature
=
tf
.
TensorSpec
(
shape
=
[
batch_size
],
dtype
=
tf
.
string
)
signatures
=
{
'serving_default'
:
module
.
inference_from_tf_example
.
get_concrete_function
(
input_signature
)
}
else
:
raise
ValueError
(
'Unrecognized `input_type`'
)
tf
.
saved_model
.
save
(
module
,
save_directory
,
signatures
=
signatures
)
def
_get_dummy_input
(
self
,
input_type
,
batch_size
):
"""Get dummy input for the given input type."""
if
input_type
==
'image_tensor'
:
return
tf
.
zeros
((
batch_size
,
640
,
640
,
3
),
dtype
=
np
.
uint8
)
elif
input_type
==
'image_bytes'
:
image
=
Image
.
fromarray
(
np
.
zeros
((
640
,
640
,
3
),
dtype
=
np
.
uint8
))
byte_io
=
io
.
BytesIO
()
image
.
save
(
byte_io
,
'PNG'
)
return
[
byte_io
.
getvalue
()
for
b
in
range
(
batch_size
)]
elif
input_type
==
'tf_example'
:
image_tensor
=
tf
.
zeros
((
640
,
640
,
3
),
dtype
=
tf
.
uint8
)
encoded_jpeg
=
tf
.
image
.
encode_jpeg
(
tf
.
constant
(
image_tensor
)).
numpy
()
example
=
tf
.
train
.
Example
(
features
=
tf
.
train
.
Features
(
feature
=
{
'image/encoded'
:
tf
.
train
.
Feature
(
bytes_list
=
tf
.
train
.
BytesList
(
value
=
[
encoded_jpeg
])),
})).
SerializeToString
()
return
[
example
for
b
in
range
(
batch_size
)]
@
parameterized
.
parameters
(
(
'image_tensor'
,
'fasterrcnn_resnetfpn_coco'
),
(
'image_bytes'
,
'fasterrcnn_resnetfpn_coco'
),
(
'tf_example'
,
'fasterrcnn_resnetfpn_coco'
),
(
'image_tensor'
,
'maskrcnn_resnetfpn_coco'
),
(
'image_bytes'
,
'maskrcnn_resnetfpn_coco'
),
(
'tf_example'
,
'maskrcnn_resnetfpn_coco'
),
(
'image_tensor'
,
'retinanet_resnetfpn_coco'
),
(
'image_bytes'
,
'retinanet_resnetfpn_coco'
),
(
'tf_example'
,
'retinanet_resnetfpn_coco'
),
)
def
test_export
(
self
,
input_type
,
experiment_name
):
tmp_dir
=
self
.
get_temp_dir
()
batch_size
=
1
experiment_name
=
'fasterrcnn_resnetfpn_coco'
module
=
self
.
_get_detection_module
(
experiment_name
)
model
=
module
.
build_model
()
self
.
_export_from_module
(
module
,
input_type
,
batch_size
,
tmp_dir
)
self
.
assertTrue
(
os
.
path
.
exists
(
os
.
path
.
join
(
tmp_dir
,
'saved_model.pb'
)))
self
.
assertTrue
(
os
.
path
.
exists
(
os
.
path
.
join
(
tmp_dir
,
'variables'
,
'variables.index'
)))
self
.
assertTrue
(
os
.
path
.
exists
(
os
.
path
.
join
(
tmp_dir
,
'variables'
,
'variables.data-00000-of-00001'
)))
imported
=
tf
.
saved_model
.
load
(
tmp_dir
)
classification_fn
=
imported
.
signatures
[
'serving_default'
]
images
=
self
.
_get_dummy_input
(
input_type
,
batch_size
)
processed_images
,
anchor_boxes
,
image_shape
=
module
.
_build_inputs
(
tf
.
zeros
((
224
,
224
,
3
),
dtype
=
tf
.
uint8
))
processed_images
=
tf
.
expand_dims
(
processed_images
,
0
)
image_shape
=
tf
.
expand_dims
(
image_shape
,
0
)
for
l
,
l_boxes
in
anchor_boxes
.
items
():
anchor_boxes
[
l
]
=
tf
.
expand_dims
(
l_boxes
,
0
)
expected_outputs
=
model
(
images
=
processed_images
,
image_shape
=
image_shape
,
anchor_boxes
=
anchor_boxes
,
training
=
False
)
outputs
=
classification_fn
(
tf
.
constant
(
images
))
self
.
assertAllClose
(
outputs
[
'num_detections'
].
numpy
(),
expected_outputs
[
'num_detections'
].
numpy
())
if
__name__
==
'__main__'
:
tf
.
test
.
main
()
official/vision/beta/serving/export_base.py
View file @
f97074b0
...
@@ -62,7 +62,7 @@ class ExportModule(tf.Module, metaclass=abc.ABCMeta):
...
@@ -62,7 +62,7 @@ class ExportModule(tf.Module, metaclass=abc.ABCMeta):
@
tf
.
function
@
tf
.
function
def
inference_from_image_tensors
(
self
,
input_tensor
):
def
inference_from_image_tensors
(
self
,
input_tensor
):
return
dict
(
outputs
=
self
.
_run_inference_on_image_tensors
(
input_tensor
)
)
return
self
.
_run_inference_on_image_tensors
(
input_tensor
)
@
tf
.
function
@
tf
.
function
def
inference_from_image_bytes
(
self
,
input_tensor
):
def
inference_from_image_bytes
(
self
,
input_tensor
):
...
@@ -76,7 +76,7 @@ class ExportModule(tf.Module, metaclass=abc.ABCMeta):
...
@@ -76,7 +76,7 @@ class ExportModule(tf.Module, metaclass=abc.ABCMeta):
shape
=
self
.
_input_image_size
+
[
3
],
dtype
=
tf
.
uint8
),
shape
=
self
.
_input_image_size
+
[
3
],
dtype
=
tf
.
uint8
),
parallel_iterations
=
32
))
parallel_iterations
=
32
))
images
=
tf
.
stack
(
images
)
images
=
tf
.
stack
(
images
)
return
dict
(
outputs
=
self
.
_run_inference_on_image_tensors
(
images
)
)
return
self
.
_run_inference_on_image_tensors
(
images
)
@
tf
.
function
@
tf
.
function
def
inference_from_tf_example
(
self
,
input_tensor
):
def
inference_from_tf_example
(
self
,
input_tensor
):
...
@@ -91,4 +91,4 @@ class ExportModule(tf.Module, metaclass=abc.ABCMeta):
...
@@ -91,4 +91,4 @@ class ExportModule(tf.Module, metaclass=abc.ABCMeta):
dtype
=
tf
.
uint8
,
dtype
=
tf
.
uint8
,
parallel_iterations
=
32
))
parallel_iterations
=
32
))
images
=
tf
.
stack
(
images
)
images
=
tf
.
stack
(
images
)
return
dict
(
outputs
=
self
.
_run_inference_on_image_tensors
(
images
)
)
return
self
.
_run_inference_on_image_tensors
(
images
)
official/vision/beta/serving/export_saved_model.py
View file @
f97074b0
...
@@ -47,6 +47,7 @@ from official.core import exp_factory
...
@@ -47,6 +47,7 @@ from official.core import exp_factory
from
official.core
import
train_utils
from
official.core
import
train_utils
from
official.modeling
import
hyperparams
from
official.modeling
import
hyperparams
from
official.vision.beta
import
configs
from
official.vision.beta
import
configs
from
official.vision.beta.serving
import
detection
from
official.vision.beta.serving
import
image_classification
from
official.vision.beta.serving
import
image_classification
FLAGS
=
flags
.
FLAGS
FLAGS
=
flags
.
FLAGS
...
@@ -105,6 +106,12 @@ def export_inference_graph(input_type, batch_size, input_image_size, params,
...
@@ -105,6 +106,12 @@ def export_inference_graph(input_type, batch_size, input_image_size, params,
params
=
params
,
params
=
params
,
batch_size
=
batch_size
,
batch_size
=
batch_size
,
input_image_size
=
input_image_size
)
input_image_size
=
input_image_size
)
elif
isinstance
(
params
.
task
,
configs
.
retinanet
.
RetinaNetTask
)
or
isinstance
(
params
.
task
,
configs
.
maskrcnn
.
MaskRCNNTask
):
export_module
=
detection
.
DetectionModule
(
params
=
params
,
batch_size
=
batch_size
,
input_image_size
=
input_image_size
)
else
:
else
:
raise
ValueError
(
'Export module not implemented for {} task.'
.
format
(
raise
ValueError
(
'Export module not implemented for {} task.'
.
format
(
type
(
params
.
task
)))
type
(
params
.
task
)))
...
@@ -124,7 +131,7 @@ def export_inference_graph(input_type, batch_size, input_image_size, params,
...
@@ -124,7 +131,7 @@ def export_inference_graph(input_type, batch_size, input_image_size, params,
dtype
=
tf
.
uint8
)
dtype
=
tf
.
uint8
)
signatures
=
{
signatures
=
{
'serving_default'
:
'serving_default'
:
export_module
.
inference_from_image
.
get_concrete_function
(
export_module
.
inference_from_image
_tensors
.
get_concrete_function
(
input_signature
)
input_signature
)
}
}
elif
input_type
==
'image_bytes'
:
elif
input_type
==
'image_bytes'
:
...
...
official/vision/beta/serving/image_classification.py
View file @
f97074b0
...
@@ -80,4 +80,4 @@ class ClassificationModule(export_base.ExportModule):
...
@@ -80,4 +80,4 @@ class ClassificationModule(export_base.ExportModule):
logits
=
self
.
_model
(
images
,
training
=
False
)
logits
=
self
.
_model
(
images
,
training
=
False
)
return
logits
return
dict
(
outputs
=
logits
)
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment