Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ModelZoo
ResNet50_tensorflow
Commits
117b5e07
Unverified
Commit
117b5e07
authored
May 04, 2021
by
vedanshu
Committed by
GitHub
May 04, 2021
Browse files
Inherited DetectionModule from the beta.serving
parent
a73d06ba
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
2 additions
and
103 deletions
+2
-103
official/vision/beta/projects/deepmac_maskrcnn/serving/detection.py
...ision/beta/projects/deepmac_maskrcnn/serving/detection.py
+2
-103
No files found.
official/vision/beta/projects/deepmac_maskrcnn/serving/detection.py
View file @
117b5e07
...
@@ -22,13 +22,9 @@ from official.vision.beta.projects.deepmac_maskrcnn.tasks import deep_mask_head_
...
@@ -22,13 +22,9 @@ from official.vision.beta.projects.deepmac_maskrcnn.tasks import deep_mask_head_
from
official.vision.beta.ops
import
anchor
from
official.vision.beta.ops
import
anchor
from
official.vision.beta.ops
import
preprocess_ops
from
official.vision.beta.ops
import
preprocess_ops
from
official.vision.beta.projects.deepmac_maskrcnn.serving
import
export_base
from
official.vision.beta.projects.deepmac_maskrcnn.serving
import
export_base
from
official.vision.beta.serving
import
detection
class
DetectionModule
(
detection
.
DetectionModule
):
MEAN_RGB
=
(
0.485
*
255
,
0.456
*
255
,
0.406
*
255
)
STDDEV_RGB
=
(
0.229
*
255
,
0.224
*
255
,
0.225
*
255
)
class
DetectionModule
(
export_base
.
ExportModule
):
"""Detection Module."""
"""Detection Module."""
def
_build_model
(
self
):
def
_build_model
(
self
):
...
@@ -48,100 +44,3 @@ class DetectionModule(export_base.ExportModule):
...
@@ -48,100 +44,3 @@ class DetectionModule(export_base.ExportModule):
type
(
self
.
params
.
task
.
model
)))
type
(
self
.
params
.
task
.
model
)))
return
model
return
model
def
_build_inputs
(
self
,
image
):
"""Builds detection model inputs for serving."""
model_params
=
self
.
params
.
task
.
model
# Normalizes image with mean and std pixel values.
image
=
preprocess_ops
.
normalize_image
(
image
,
offset
=
MEAN_RGB
,
scale
=
STDDEV_RGB
)
image
,
image_info
=
preprocess_ops
.
resize_and_crop_image
(
image
,
self
.
_input_image_size
,
padded_size
=
preprocess_ops
.
compute_padded_size
(
self
.
_input_image_size
,
2
**
model_params
.
max_level
),
aug_scale_min
=
1.0
,
aug_scale_max
=
1.0
)
input_anchor
=
anchor
.
build_anchor_generator
(
min_level
=
model_params
.
min_level
,
max_level
=
model_params
.
max_level
,
num_scales
=
model_params
.
anchor
.
num_scales
,
aspect_ratios
=
model_params
.
anchor
.
aspect_ratios
,
anchor_size
=
model_params
.
anchor
.
anchor_size
)
anchor_boxes
=
input_anchor
(
image_size
=
(
self
.
_input_image_size
[
0
],
self
.
_input_image_size
[
1
]))
return
image
,
anchor_boxes
,
image_info
def
serve
(
self
,
images
:
tf
.
Tensor
):
"""Cast image to float and run inference.
Args:
images: uint8 Tensor of shape [batch_size, None, None, 3]
Returns:
Tensor holding detection output logits.
"""
model_params
=
self
.
params
.
task
.
model
with
tf
.
device
(
'cpu:0'
):
images
=
tf
.
cast
(
images
,
dtype
=
tf
.
float32
)
# Tensor Specs for map_fn outputs (images, anchor_boxes, and image_info).
images_spec
=
tf
.
TensorSpec
(
shape
=
self
.
_input_image_size
+
[
3
],
dtype
=
tf
.
float32
)
num_anchors
=
model_params
.
anchor
.
num_scales
*
len
(
model_params
.
anchor
.
aspect_ratios
)
*
4
anchor_shapes
=
[]
for
level
in
range
(
model_params
.
min_level
,
model_params
.
max_level
+
1
):
anchor_level_spec
=
tf
.
TensorSpec
(
shape
=
[
self
.
_input_image_size
[
0
]
//
2
**
level
,
self
.
_input_image_size
[
1
]
//
2
**
level
,
num_anchors
],
dtype
=
tf
.
float32
)
anchor_shapes
.
append
((
str
(
level
),
anchor_level_spec
))
image_info_spec
=
tf
.
TensorSpec
(
shape
=
[
4
,
2
],
dtype
=
tf
.
float32
)
images
,
anchor_boxes
,
image_info
=
tf
.
nest
.
map_structure
(
tf
.
identity
,
tf
.
map_fn
(
self
.
_build_inputs
,
elems
=
images
,
fn_output_signature
=
(
images_spec
,
dict
(
anchor_shapes
),
image_info_spec
),
parallel_iterations
=
32
))
input_image_shape
=
image_info
[:,
1
,
:]
# To overcome keras.Model extra limitation to save a model with layers that
# have multiple inputs, we use `model.call` here to trigger the forward
# path. Note that, this disables some keras magics happens in `__call__`.
detections
=
self
.
model
.
call
(
images
=
images
,
image_shape
=
input_image_shape
,
anchor_boxes
=
anchor_boxes
,
training
=
False
)
if
self
.
params
.
task
.
model
.
detection_generator
.
apply_nms
:
final_outputs
=
{
'detection_boxes'
:
detections
[
'detection_boxes'
],
'detection_scores'
:
detections
[
'detection_scores'
],
'detection_classes'
:
detections
[
'detection_classes'
],
'num_detections'
:
detections
[
'num_detections'
]
}
else
:
final_outputs
=
{
'decoded_boxes'
:
detections
[
'decoded_boxes'
],
'decoded_box_scores'
:
detections
[
'decoded_box_scores'
],
'cls_outputs'
:
detections
[
'cls_outputs'
],
'box_outputs'
:
detections
[
'box_outputs'
]
}
if
'detection_masks'
in
detections
.
keys
():
final_outputs
[
'detection_masks'
]
=
detections
[
'detection_masks'
]
final_outputs
.
update
({
'image_info'
:
image_info
})
return
final_outputs
\ No newline at end of file
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment