Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ModelZoo
ResNet50_tensorflow
Commits
c57e975a
Commit
c57e975a
authored
Nov 29, 2021
by
saberkun
Browse files
Merge pull request #10338 from srihari-humbarwadi:readme
PiperOrigin-RevId: 413033276
parents
7fb4f3cd
acf4156e
Changes
291
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
989 additions
and
73 deletions
+989
-73
official/vision/beta/evaluation/coco_utils.py
official/vision/beta/evaluation/coco_utils.py
+25
-10
official/vision/beta/evaluation/coco_utils_test.py
official/vision/beta/evaluation/coco_utils_test.py
+49
-0
official/vision/beta/evaluation/iou_test.py
official/vision/beta/evaluation/iou_test.py
+16
-0
official/vision/beta/evaluation/panoptic_quality_evaluator_test.py
...vision/beta/evaluation/panoptic_quality_evaluator_test.py
+12
-6
official/vision/beta/evaluation/segmentation_metrics.py
official/vision/beta/evaluation/segmentation_metrics.py
+9
-14
official/vision/beta/evaluation/segmentation_metrics_test.py
official/vision/beta/evaluation/segmentation_metrics_test.py
+77
-0
official/vision/beta/losses/focal_loss.py
official/vision/beta/losses/focal_loss.py
+0
-0
official/vision/beta/losses/loss_utils.py
official/vision/beta/losses/loss_utils.py
+0
-0
official/vision/beta/modeling/backbones/__init__.py
official/vision/beta/modeling/backbones/__init__.py
+1
-0
official/vision/beta/modeling/backbones/factory_test.py
official/vision/beta/modeling/backbones/factory_test.py
+34
-0
official/vision/beta/modeling/backbones/mobiledet.py
official/vision/beta/modeling/backbones/mobiledet.py
+579
-0
official/vision/beta/modeling/backbones/mobiledet_test.py
official/vision/beta/modeling/backbones/mobiledet_test.py
+114
-0
official/vision/beta/modeling/backbones/mobilenet.py
official/vision/beta/modeling/backbones/mobilenet.py
+1
-1
official/vision/beta/modeling/decoders/aspp.py
official/vision/beta/modeling/decoders/aspp.py
+30
-11
official/vision/beta/modeling/decoders/aspp_test.py
official/vision/beta/modeling/decoders/aspp_test.py
+12
-9
official/vision/beta/modeling/decoders/nasfpn.py
official/vision/beta/modeling/decoders/nasfpn.py
+7
-7
official/vision/beta/modeling/heads/segmentation_heads.py
official/vision/beta/modeling/heads/segmentation_heads.py
+21
-12
official/vision/beta/modeling/heads/segmentation_heads_test.py
...ial/vision/beta/modeling/heads/segmentation_heads_test.py
+1
-1
official/vision/beta/modeling/layers/deeplab.py
official/vision/beta/modeling/layers/deeplab.py
+0
-1
official/vision/beta/modeling/layers/detection_generator.py
official/vision/beta/modeling/layers/detection_generator.py
+1
-1
No files found.
official/vision/beta/evaluation/coco_utils.py
View file @
c57e975a
...
...
@@ -212,6 +212,8 @@ def convert_groundtruths_to_coco_dataset(groundtruths, label_map=None):
gt_annotations
=
[]
num_batches
=
len
(
groundtruths
[
'source_id'
])
for
i
in
range
(
num_batches
):
logging
.
info
(
'convert_groundtruths_to_coco_dataset: Processing annotation %d'
,
i
)
max_num_instances
=
groundtruths
[
'classes'
][
i
].
shape
[
1
]
batch_size
=
groundtruths
[
'source_id'
][
i
].
shape
[
0
]
for
j
in
range
(
batch_size
):
...
...
@@ -259,6 +261,10 @@ def convert_groundtruths_to_coco_dataset(groundtruths, label_map=None):
np_mask
[
np_mask
>
0
]
=
255
encoded_mask
=
mask_api
.
encode
(
np
.
asfortranarray
(
np_mask
))
ann
[
'segmentation'
]
=
encoded_mask
# Ensure the content of `counts` is JSON serializable string.
if
'counts'
in
ann
[
'segmentation'
]:
ann
[
'segmentation'
][
'counts'
]
=
six
.
ensure_str
(
ann
[
'segmentation'
][
'counts'
])
if
'areas'
not
in
groundtruths
:
ann
[
'area'
]
=
mask_api
.
area
(
encoded_mask
)
gt_annotations
.
append
(
ann
)
...
...
@@ -283,11 +289,13 @@ def convert_groundtruths_to_coco_dataset(groundtruths, label_map=None):
class
COCOGroundtruthGenerator
:
"""Generates the groundtruth annotations from a single example."""
def
__init__
(
self
,
file_pattern
,
file_type
,
num_examples
,
include_mask
):
def
__init__
(
self
,
file_pattern
,
file_type
,
num_examples
,
include_mask
,
regenerate_source_id
=
False
):
self
.
_file_pattern
=
file_pattern
self
.
_num_examples
=
num_examples
self
.
_include_mask
=
include_mask
self
.
_dataset_fn
=
dataset_fn
.
pick_dataset_fn
(
file_type
)
self
.
_regenerate_source_id
=
regenerate_source_id
def
_parse_single_example
(
self
,
example
):
"""Parses a single serialized tf.Example proto.
...
...
@@ -312,16 +320,21 @@ class COCOGroundtruthGenerator:
mask of each instance.
"""
decoder
=
tf_example_decoder
.
TfExampleDecoder
(
include_mask
=
self
.
_include_mask
)
include_mask
=
self
.
_include_mask
,
regenerate_source_id
=
self
.
_regenerate_source_id
)
decoded_tensors
=
decoder
.
decode
(
example
)
image
=
decoded_tensors
[
'image'
]
image_size
=
tf
.
shape
(
image
)[
0
:
2
]
boxes
=
box_ops
.
denormalize_boxes
(
decoded_tensors
[
'groundtruth_boxes'
],
image_size
)
source_id
=
decoded_tensors
[
'source_id'
]
if
source_id
.
dtype
is
tf
.
string
:
source_id
=
tf
.
strings
.
to_number
(
source_id
,
out_type
=
tf
.
int64
)
groundtruths
=
{
'source_id'
:
tf
.
strings
.
to_number
(
decoded_tensors
[
'source_id'
],
out_type
=
tf
.
int64
),
'source_id'
:
source_id
,
'height'
:
decoded_tensors
[
'height'
],
'width'
:
decoded_tensors
[
'width'
],
'num_detections'
:
tf
.
shape
(
decoded_tensors
[
'groundtruth_classes'
])[
0
],
...
...
@@ -341,9 +354,10 @@ class COCOGroundtruthGenerator:
dataset
=
tf
.
data
.
Dataset
.
list_files
(
self
.
_file_pattern
,
shuffle
=
False
)
dataset
=
dataset
.
interleave
(
map_func
=
lambda
filename
:
self
.
_dataset_fn
(
filename
).
prefetch
(
1
),
cycle_length
=
12
,
cycle_length
=
None
,
num_parallel_calls
=
tf
.
data
.
experimental
.
AUTOTUNE
)
dataset
=
dataset
.
take
(
self
.
_num_examples
)
dataset
=
dataset
.
map
(
self
.
_parse_single_example
,
num_parallel_calls
=
tf
.
data
.
experimental
.
AUTOTUNE
)
dataset
=
dataset
.
batch
(
1
,
drop_remainder
=
False
)
...
...
@@ -351,18 +365,18 @@ class COCOGroundtruthGenerator:
return
dataset
def
__call__
(
self
):
for
groundtruth_result
in
self
.
_build_pipeline
():
yield
groundtruth_result
return
self
.
_build_pipeline
()
def
scan_and_generator_annotation_file
(
file_pattern
:
str
,
file_type
:
str
,
num_samples
:
int
,
include_mask
:
bool
,
annotation_file
:
str
):
annotation_file
:
str
,
regenerate_source_id
:
bool
=
False
):
"""Scans and generate the COCO-style annotation JSON file given a dataset."""
groundtruth_generator
=
COCOGroundtruthGenerator
(
file_pattern
,
file_type
,
num_samples
,
include_mask
)
file_pattern
,
file_type
,
num_samples
,
include_mask
,
regenerate_source_id
)
generate_annotation_file
(
groundtruth_generator
,
annotation_file
)
...
...
@@ -371,7 +385,8 @@ def generate_annotation_file(groundtruth_generator,
"""Generates COCO-style annotation JSON file given a groundtruth generator."""
groundtruths
=
{}
logging
.
info
(
'Loading groundtruth annotations from dataset to memory...'
)
for
groundtruth
in
groundtruth_generator
():
for
i
,
groundtruth
in
enumerate
(
groundtruth_generator
()):
logging
.
info
(
'generate_annotation_file: Processing annotation %d'
,
i
)
for
k
,
v
in
six
.
iteritems
(
groundtruth
):
if
k
not
in
groundtruths
:
groundtruths
[
k
]
=
[
v
]
...
...
official/vision/beta/evaluation/coco_utils_test.py
0 → 100644
View file @
c57e975a
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for coco_utils."""
import
os
import
tensorflow
as
tf
from
official.vision.beta.dataloaders
import
tfexample_utils
from
official.vision.beta.evaluation
import
coco_utils
class
CocoUtilsTest
(
tf
.
test
.
TestCase
):
def
test_scan_and_generator_annotation_file
(
self
):
num_samples
=
10
example
=
tfexample_utils
.
create_detection_test_example
(
image_height
=
512
,
image_width
=
512
,
image_channel
=
3
,
num_instances
=
10
)
tf_examples
=
[
example
]
*
num_samples
data_file
=
os
.
path
.
join
(
self
.
create_tempdir
(),
'test.tfrecord'
)
tfexample_utils
.
dump_to_tfrecord
(
record_file
=
data_file
,
tf_examples
=
tf_examples
)
annotation_file
=
os
.
path
.
join
(
self
.
create_tempdir
(),
'annotation.json'
)
coco_utils
.
scan_and_generator_annotation_file
(
file_pattern
=
data_file
,
file_type
=
'tfrecord'
,
num_samples
=
num_samples
,
include_mask
=
True
,
annotation_file
=
annotation_file
)
self
.
assertTrue
(
tf
.
io
.
gfile
.
exists
(
annotation_file
),
msg
=
'Annotation file {annotation_file} does not exists.'
)
if
__name__
==
'__main__'
:
tf
.
test
.
main
()
official/vision/beta/evaluation/iou_test.py
View file @
c57e975a
...
...
@@ -95,5 +95,21 @@ class MeanIoUTest(tf.test.TestCase):
expected_result
=
[
0
,
1
/
(
1
+
1
-
1
)]
self
.
assertAllClose
(
expected_result
,
result
,
atol
=
1e-3
)
def
test_update_state_annd_result
(
self
):
y_pred
=
[
0
,
1
,
0
,
1
]
y_true
=
[
0
,
0
,
1
,
1
]
m_obj
=
iou
.
PerClassIoU
(
num_classes
=
2
)
m_obj
.
update_state
(
y_true
,
y_pred
)
result
=
m_obj
.
result
()
# cm = [[1, 1],
# [1, 1]]
# sum_row = [2, 2], sum_col = [2, 2], true_positives = [1, 1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result
=
[
1
/
(
2
+
2
-
1
),
1
/
(
2
+
2
-
1
)]
self
.
assertAllClose
(
expected_result
,
result
,
atol
=
1e-3
)
if
__name__
==
'__main__'
:
tf
.
test
.
main
()
official/vision/beta/evaluation/panoptic_quality_evaluator_test.py
View file @
c57e975a
...
...
@@ -45,19 +45,25 @@ class PanopticQualityEvaluatorTest(tf.test.TestCase):
dtype
=
np
.
uint16
)
groundtruths
=
{
'category_mask'
:
tf
.
convert_to_tensor
(
category_mask
),
'instance_mask'
:
tf
.
convert_to_tensor
(
groundtruth_instance_mask
)
'category_mask'
:
tf
.
convert_to_tensor
([
category_mask
]),
'instance_mask'
:
tf
.
convert_to_tensor
([
groundtruth_instance_mask
]),
'image_info'
:
tf
.
convert_to_tensor
([[[
6
,
6
],
[
6
,
6
],
[
1.0
,
1.0
],
[
0
,
0
]]],
dtype
=
tf
.
float32
)
}
predictions
=
{
'category_mask'
:
tf
.
convert_to_tensor
(
category_mask
),
'instance_mask'
:
tf
.
convert_to_tensor
(
good_det_instance_mask
)
'category_mask'
:
tf
.
convert_to_tensor
(
[
category_mask
]
),
'instance_mask'
:
tf
.
convert_to_tensor
(
[
good_det_instance_mask
]
)
}
pq_evaluator
=
panoptic_quality_evaluator
.
PanopticQualityEvaluator
(
num_categories
=
1
,
ignored_label
=
2
,
max_instances_per_category
=
16
,
offset
=
16
)
offset
=
16
,
rescale_predictions
=
True
)
for
_
in
range
(
2
):
pq_evaluator
.
update_state
(
groundtruths
,
predictions
)
...
...
@@ -70,7 +76,7 @@ class PanopticQualityEvaluatorTest(tf.test.TestCase):
[
1
,
1
,
1
,
1
,
1
,
1
],
],
dtype
=
np
.
uint16
)
predictions
[
'instance_mask'
]
=
tf
.
convert_to_tensor
(
bad_det_instance_mask
)
predictions
[
'instance_mask'
]
=
tf
.
convert_to_tensor
(
[
bad_det_instance_mask
]
)
for
_
in
range
(
2
):
pq_evaluator
.
update_state
(
groundtruths
,
predictions
)
...
...
official/vision/beta/evaluation/segmentation_metrics.py
View file @
c57e975a
...
...
@@ -41,8 +41,7 @@ class MeanIoU(tf.keras.metrics.MeanIoU):
dtype: data type of the metric result.
"""
self
.
_rescale_predictions
=
rescale_predictions
super
(
MeanIoU
,
self
).
__init__
(
num_classes
=
num_classes
,
name
=
name
,
dtype
=
dtype
)
super
().
__init__
(
num_classes
=
num_classes
,
name
=
name
,
dtype
=
dtype
)
def
update_state
(
self
,
y_true
,
y_pred
):
"""Updates metric state.
...
...
@@ -120,15 +119,14 @@ class MeanIoU(tf.keras.metrics.MeanIoU):
flatten_masks
=
tf
.
reshape
(
masks
,
shape
=
[
-
1
])
flatten_valid_masks
=
tf
.
reshape
(
valid_masks
,
shape
=
[
-
1
])
super
(
MeanIoU
,
self
).
update_state
(
flatten_masks
,
flatten_predictions
,
tf
.
cast
(
flatten_valid_masks
,
tf
.
float32
))
super
().
update_state
(
flatten_masks
,
flatten_predictions
,
tf
.
cast
(
flatten_valid_masks
,
tf
.
float32
))
class
PerClassIoU
(
iou
.
PerClassIoU
):
"""Per Class IoU metric for semantic segmentation.
This class utilizes
keras_cv.metrics
.PerClassIoU to perform batched per class
This class utilizes
iou
.PerClassIoU to perform batched per class
iou when both input images and groundtruth masks are resized to the same size
(rescale_predictions=False). It also computes per class iou on groundtruth
original sizes, in which case, each prediction is rescaled back to the
...
...
@@ -148,8 +146,7 @@ class PerClassIoU(iou.PerClassIoU):
dtype: data type of the metric result.
"""
self
.
_rescale_predictions
=
rescale_predictions
super
(
PerClassIoU
,
self
).
__init__
(
num_classes
=
num_classes
,
name
=
name
,
dtype
=
dtype
)
super
().
__init__
(
num_classes
=
num_classes
,
name
=
name
,
dtype
=
dtype
)
def
update_state
(
self
,
y_true
,
y_pred
):
"""Updates metric state.
...
...
@@ -213,9 +210,8 @@ class PerClassIoU(iou.PerClassIoU):
flatten_predictions
=
tf
.
reshape
(
predicted_mask
,
shape
=
[
1
,
-
1
])
flatten_masks
=
tf
.
reshape
(
mask
,
shape
=
[
1
,
-
1
])
flatten_valid_masks
=
tf
.
reshape
(
valid_mask
,
shape
=
[
1
,
-
1
])
super
(
PerClassIoU
,
self
).
update_state
(
flatten_masks
,
flatten_predictions
,
tf
.
cast
(
flatten_valid_masks
,
tf
.
float32
))
super
().
update_state
(
flatten_masks
,
flatten_predictions
,
tf
.
cast
(
flatten_valid_masks
,
tf
.
float32
))
else
:
predictions
=
tf
.
image
.
resize
(
...
...
@@ -227,6 +223,5 @@ class PerClassIoU(iou.PerClassIoU):
flatten_masks
=
tf
.
reshape
(
masks
,
shape
=
[
-
1
])
flatten_valid_masks
=
tf
.
reshape
(
valid_masks
,
shape
=
[
-
1
])
super
(
PerClassIoU
,
self
).
update_state
(
flatten_masks
,
flatten_predictions
,
tf
.
cast
(
flatten_valid_masks
,
tf
.
float32
))
super
().
update_state
(
flatten_masks
,
flatten_predictions
,
tf
.
cast
(
flatten_valid_masks
,
tf
.
float32
))
official/vision/beta/evaluation/segmentation_metrics_test.py
0 → 100644
View file @
c57e975a
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for segmentation_metrics."""
from
absl.testing
import
parameterized
import
numpy
as
np
import
tensorflow
as
tf
from
official.vision.beta.evaluation
import
segmentation_metrics
class
SegmentationMetricsTest
(
parameterized
.
TestCase
,
tf
.
test
.
TestCase
):
def
_create_test_data
(
self
):
y_pred_cls0
=
np
.
expand_dims
(
np
.
array
([[
1
,
1
,
0
],
[
1
,
1
,
0
],
[
0
,
0
,
0
]],
dtype
=
np
.
uint16
),
axis
=
(
0
,
-
1
))
y_pred_cls1
=
np
.
expand_dims
(
np
.
array
([[
0
,
0
,
0
],
[
0
,
0
,
1
],
[
0
,
0
,
1
]],
dtype
=
np
.
uint16
),
axis
=
(
0
,
-
1
))
y_pred
=
np
.
concatenate
((
y_pred_cls0
,
y_pred_cls1
),
axis
=-
1
)
y_true
=
{
'masks'
:
np
.
expand_dims
(
np
.
array
([[
0
,
0
,
0
,
0
,
0
,
0
],
[
0
,
0
,
0
,
0
,
0
,
0
],
[
0
,
0
,
0
,
0
,
0
,
0
],
[
0
,
0
,
0
,
1
,
1
,
1
],
[
0
,
0
,
0
,
1
,
1
,
1
],
[
0
,
0
,
0
,
1
,
1
,
1
]],
dtype
=
np
.
uint16
),
axis
=
(
0
,
-
1
)),
'valid_masks'
:
np
.
ones
([
1
,
6
,
6
,
1
],
dtype
=
np
.
uint16
),
'image_info'
:
np
.
array
([[[
6
,
6
],
[
3
,
3
],
[
0.5
,
0.5
],
[
0
,
0
]]],
dtype
=
np
.
float32
)
}
return
y_pred
,
y_true
@
parameterized
.
parameters
(
True
,
False
)
def
test_mean_iou_metric
(
self
,
rescale_predictions
):
tf
.
config
.
experimental_run_functions_eagerly
(
True
)
mean_iou_metric
=
segmentation_metrics
.
MeanIoU
(
num_classes
=
2
,
rescale_predictions
=
rescale_predictions
)
y_pred
,
y_true
=
self
.
_create_test_data
()
# Disable autograph for correct coverage statistics.
update_fn
=
tf
.
autograph
.
experimental
.
do_not_convert
(
mean_iou_metric
.
update_state
)
update_fn
(
y_true
=
y_true
,
y_pred
=
y_pred
)
miou
=
mean_iou_metric
.
result
()
self
.
assertAlmostEqual
(
miou
.
numpy
(),
0.762
,
places
=
3
)
@
parameterized
.
parameters
(
True
,
False
)
def
test_per_class_mean_iou_metric
(
self
,
rescale_predictions
):
per_class_iou_metric
=
segmentation_metrics
.
PerClassIoU
(
num_classes
=
2
,
rescale_predictions
=
rescale_predictions
)
y_pred
,
y_true
=
self
.
_create_test_data
()
# Disable autograph for correct coverage statistics.
update_fn
=
tf
.
autograph
.
experimental
.
do_not_convert
(
per_class_iou_metric
.
update_state
)
update_fn
(
y_true
=
y_true
,
y_pred
=
y_pred
)
per_class_miou
=
per_class_iou_metric
.
result
()
self
.
assertAllClose
(
per_class_miou
.
numpy
(),
[
0.857
,
0.667
],
atol
=
1e-3
)
if
__name__
==
'__main__'
:
tf
.
test
.
main
()
official/vision/
keras_cv
/losses/focal_loss.py
→
official/vision/
beta
/losses/focal_loss.py
View file @
c57e975a
File moved
official/vision/
keras_cv
/losses/loss_utils.py
→
official/vision/
beta
/losses/loss_utils.py
View file @
c57e975a
File moved
official/vision/beta/modeling/backbones/__init__.py
View file @
c57e975a
...
...
@@ -16,6 +16,7 @@
"""Backbones package definition."""
from
official.vision.beta.modeling.backbones.efficientnet
import
EfficientNet
from
official.vision.beta.modeling.backbones.mobiledet
import
MobileDet
from
official.vision.beta.modeling.backbones.mobilenet
import
MobileNet
from
official.vision.beta.modeling.backbones.resnet
import
ResNet
from
official.vision.beta.modeling.backbones.resnet_3d
import
ResNet3D
...
...
official/vision/beta/modeling/backbones/factory_test.py
View file @
c57e975a
...
...
@@ -189,6 +189,40 @@ class FactoryTest(tf.test.TestCase, parameterized.TestCase):
norm_momentum
=
0.99
,
norm_epsilon
=
1e-5
)
@
combinations
.
generate
(
combinations
.
combine
(
model_id
=
[
'MobileDetCPU'
,
'MobileDetDSP'
,
'MobileDetEdgeTPU'
,
'MobileDetGPU'
],
filter_size_scale
=
[
1.0
,
0.75
],
))
def
test_mobiledet_creation
(
self
,
model_id
,
filter_size_scale
):
"""Test creation of Mobiledet models."""
network
=
backbones
.
MobileDet
(
model_id
=
model_id
,
filter_size_scale
=
filter_size_scale
,
norm_momentum
=
0.99
,
norm_epsilon
=
1e-5
)
backbone_config
=
backbones_cfg
.
Backbone
(
type
=
'mobiledet'
,
mobiledet
=
backbones_cfg
.
MobileDet
(
model_id
=
model_id
,
filter_size_scale
=
filter_size_scale
))
norm_activation_config
=
common_cfg
.
NormActivation
(
norm_momentum
=
0.99
,
norm_epsilon
=
1e-5
,
use_sync_bn
=
False
)
factory_network
=
factory
.
build_backbone
(
input_specs
=
tf
.
keras
.
layers
.
InputSpec
(
shape
=
[
None
,
None
,
None
,
3
]),
backbone_config
=
backbone_config
,
norm_activation_config
=
norm_activation_config
)
network_config
=
network
.
get_config
()
factory_network_config
=
factory_network
.
get_config
()
self
.
assertEqual
(
network_config
,
factory_network_config
)
if
__name__
==
'__main__'
:
tf
.
test
.
main
()
official/vision/beta/modeling/backbones/mobiledet.py
0 → 100644
View file @
c57e975a
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Definitions of MobileDet Networks."""
import
dataclasses
from
typing
import
Any
,
Dict
,
Optional
,
Tuple
,
List
import
tensorflow
as
tf
from
official.modeling
import
hyperparams
from
official.vision.beta.modeling.backbones
import
factory
from
official.vision.beta.modeling.backbones
import
mobilenet
from
official.vision.beta.modeling.layers
import
nn_blocks
from
official.vision.beta.modeling.layers
import
nn_layers
layers
=
tf
.
keras
.
layers
# pylint: disable=pointless-string-statement
"""
Architecture: https://arxiv.org/abs/1704.04861.
"MobileDets: Searching for Object Detection Architectures for
Mobile Accelerators" Yunyang Xiong, Hanxiao Liu, Suyog Gupta, Berkin Akin,
Gabriel Bender, Yongzhe Wang, Pieter-Jan Kindermans, Mingxing Tan, Vikas Singh,
Bo Chen
Note that `round_down_protection` flag should be set to false for scaling
of the network.
"""
MD_CPU_BLOCK_SPECS
=
{
'spec_name'
:
'MobileDetCPU'
,
# [expand_ratio] is set to 1 and [use_residual] is set to false
# for inverted_bottleneck_no_expansion
# [se_ratio] is set to 0.25 for all inverted_bottleneck layers
# [activation] is set to 'hard_swish' for all applicable layers
'block_spec_schema'
:
[
'block_fn'
,
'kernel_size'
,
'strides'
,
'filters'
,
'activation'
,
'se_ratio'
,
'expand_ratio'
,
'use_residual'
,
'is_output'
],
'block_specs'
:
[
(
'convbn'
,
3
,
2
,
16
,
'hard_swish'
,
None
,
None
,
None
,
False
),
# inverted_bottleneck_no_expansion
(
'invertedbottleneck'
,
3
,
1
,
8
,
'hard_swish'
,
0.25
,
1.
,
False
,
True
),
(
'invertedbottleneck'
,
3
,
2
,
16
,
'hard_swish'
,
0.25
,
4.
,
False
,
True
),
(
'invertedbottleneck'
,
3
,
2
,
32
,
'hard_swish'
,
0.25
,
8.
,
False
,
False
),
(
'invertedbottleneck'
,
3
,
1
,
32
,
'hard_swish'
,
0.25
,
4.
,
True
,
False
),
(
'invertedbottleneck'
,
3
,
1
,
32
,
'hard_swish'
,
0.25
,
4.
,
True
,
False
),
(
'invertedbottleneck'
,
3
,
1
,
32
,
'hard_swish'
,
0.25
,
4.
,
True
,
True
),
(
'invertedbottleneck'
,
5
,
2
,
72
,
'hard_swish'
,
0.25
,
8.
,
False
,
False
),
(
'invertedbottleneck'
,
3
,
1
,
72
,
'hard_swish'
,
0.25
,
8.
,
True
,
False
),
(
'invertedbottleneck'
,
5
,
1
,
72
,
'hard_swish'
,
0.25
,
4.
,
True
,
False
),
(
'invertedbottleneck'
,
3
,
1
,
72
,
'hard_swish'
,
0.25
,
4.
,
True
,
False
),
(
'invertedbottleneck'
,
3
,
1
,
72
,
'hard_swish'
,
0.25
,
8.
,
False
,
False
),
(
'invertedbottleneck'
,
3
,
1
,
72
,
'hard_swish'
,
0.25
,
8.
,
True
,
False
),
(
'invertedbottleneck'
,
3
,
1
,
72
,
'hard_swish'
,
0.25
,
8.
,
True
,
False
),
(
'invertedbottleneck'
,
3
,
1
,
72
,
'hard_swish'
,
0.25
,
8.
,
True
,
True
),
(
'invertedbottleneck'
,
5
,
2
,
104
,
'hard_swish'
,
0.25
,
8.
,
False
,
False
),
(
'invertedbottleneck'
,
5
,
1
,
104
,
'hard_swish'
,
0.25
,
4.
,
True
,
False
),
(
'invertedbottleneck'
,
5
,
1
,
104
,
'hard_swish'
,
0.25
,
4.
,
True
,
False
),
(
'invertedbottleneck'
,
3
,
1
,
104
,
'hard_swish'
,
0.25
,
4.
,
True
,
False
),
(
'invertedbottleneck'
,
3
,
1
,
144
,
'hard_swish'
,
0.25
,
8.
,
False
,
True
),
]
}
MD_DSP_BLOCK_SPECS
=
{
'spec_name'
:
'MobileDetDSP'
,
# [expand_ratio] is set to 1 and [use_residual] is set to false
# for inverted_bottleneck_no_expansion
# [use_depthwise] is set to False for fused_conv
# [se_ratio] is set to None for all inverted_bottleneck layers
# [activation] is set to 'relu6' for all applicable layers
'block_spec_schema'
:
[
'block_fn'
,
'kernel_size'
,
'strides'
,
'filters'
,
'activation'
,
'se_ratio'
,
'expand_ratio'
,
'input_compression_ratio'
,
'output_compression_ratio'
,
'use_depthwise'
,
'use_residual'
,
'is_output'
],
'block_specs'
:
[
(
'convbn'
,
3
,
2
,
32
,
'relu6'
,
None
,
None
,
None
,
None
,
None
,
None
,
False
),
# inverted_bottleneck_no_expansion
(
'invertedbottleneck'
,
3
,
1
,
24
,
'relu6'
,
None
,
1.
,
None
,
None
,
True
,
False
,
True
),
(
'invertedbottleneck'
,
3
,
2
,
32
,
'relu6'
,
None
,
4.
,
None
,
None
,
False
,
False
,
False
),
# fused_conv
(
'invertedbottleneck'
,
3
,
1
,
32
,
'relu6'
,
None
,
4.
,
None
,
None
,
False
,
True
,
False
),
# fused_conv
(
'invertedbottleneck'
,
3
,
1
,
32
,
'relu6'
,
None
,
4.
,
None
,
None
,
True
,
True
,
False
),
(
'tucker'
,
3
,
1
,
32
,
'relu6'
,
None
,
None
,
0.25
,
0.75
,
None
,
True
,
True
),
(
'invertedbottleneck'
,
3
,
2
,
64
,
'relu6'
,
None
,
8.
,
None
,
None
,
False
,
False
,
False
),
# fused_conv
(
'invertedbottleneck'
,
3
,
1
,
64
,
'relu6'
,
None
,
4.
,
None
,
None
,
True
,
True
,
False
),
(
'invertedbottleneck'
,
3
,
1
,
64
,
'relu6'
,
None
,
4.
,
None
,
None
,
False
,
True
,
False
),
# fused_conv
(
'invertedbottleneck'
,
3
,
1
,
64
,
'relu6'
,
None
,
4.
,
None
,
None
,
False
,
True
,
True
),
# fused_conv
(
'invertedbottleneck'
,
3
,
2
,
120
,
'relu6'
,
None
,
8.
,
None
,
None
,
False
,
False
,
False
),
# fused_conv
(
'invertedbottleneck'
,
3
,
1
,
120
,
'relu6'
,
None
,
4.
,
None
,
None
,
True
,
True
,
False
),
(
'invertedbottleneck'
,
3
,
1
,
120
,
'relu6'
,
None
,
8
,
None
,
None
,
True
,
True
,
False
),
(
'invertedbottleneck'
,
3
,
1
,
120
,
'relu6'
,
None
,
8.
,
None
,
None
,
True
,
True
,
False
),
(
'invertedbottleneck'
,
3
,
1
,
144
,
'relu6'
,
None
,
8.
,
None
,
None
,
False
,
False
,
False
),
# fused_conv
(
'invertedbottleneck'
,
3
,
1
,
144
,
'relu6'
,
None
,
8.
,
None
,
None
,
True
,
True
,
False
),
(
'invertedbottleneck'
,
3
,
1
,
144
,
'relu6'
,
None
,
8
,
None
,
None
,
True
,
True
,
False
),
(
'invertedbottleneck'
,
3
,
1
,
144
,
'relu6'
,
None
,
8.
,
None
,
None
,
True
,
True
,
True
),
(
'invertedbottleneck'
,
3
,
2
,
160
,
'relu6'
,
None
,
4
,
None
,
None
,
True
,
False
,
False
),
(
'invertedbottleneck'
,
3
,
1
,
160
,
'relu6'
,
None
,
4
,
None
,
None
,
True
,
True
,
False
),
(
'invertedbottleneck'
,
3
,
1
,
160
,
'relu6'
,
None
,
4.
,
None
,
None
,
False
,
False
,
False
),
# fused_conv
(
'tucker'
,
3
,
1
,
160
,
'relu6'
,
None
,
None
,
0.75
,
0.75
,
None
,
True
,
False
),
(
'invertedbottleneck'
,
3
,
1
,
240
,
'relu6'
,
None
,
8
,
None
,
None
,
True
,
False
,
True
),
]
}
MD_EdgeTPU_BLOCK_SPECS
=
{
'spec_name'
:
'MobileDetEdgeTPU'
,
# [use_depthwise] is set to False for fused_conv
# [se_ratio] is set to None for all inverted_bottleneck layers
# [activation] is set to 'relu6' for all applicable layers
'block_spec_schema'
:
[
'block_fn'
,
'kernel_size'
,
'strides'
,
'filters'
,
'activation'
,
'se_ratio'
,
'expand_ratio'
,
'input_compression_ratio'
,
'output_compression_ratio'
,
'use_depthwise'
,
'use_residual'
,
'is_output'
],
'block_specs'
:
[
(
'convbn'
,
3
,
2
,
32
,
'relu6'
,
None
,
None
,
None
,
None
,
None
,
None
,
False
),
(
'tucker'
,
3
,
1
,
16
,
'relu6'
,
None
,
None
,
0.25
,
0.75
,
None
,
False
,
True
),
(
'invertedbottleneck'
,
3
,
2
,
16
,
'relu6'
,
None
,
8.
,
None
,
None
,
False
,
False
,
False
),
# fused_conv
(
'invertedbottleneck'
,
3
,
1
,
16
,
'relu6'
,
None
,
4.
,
None
,
None
,
False
,
True
,
False
),
# fused_conv
(
'invertedbottleneck'
,
3
,
1
,
16
,
'relu6'
,
None
,
8.
,
None
,
None
,
False
,
True
,
False
),
# fused_conv
(
'invertedbottleneck'
,
3
,
1
,
16
,
'relu6'
,
None
,
4.
,
None
,
None
,
False
,
True
,
True
),
# fused_conv
(
'invertedbottleneck'
,
5
,
2
,
40
,
'relu6'
,
None
,
8.
,
None
,
None
,
False
,
False
,
False
),
# fused_conv
(
'invertedbottleneck'
,
3
,
1
,
40
,
'relu6'
,
None
,
4.
,
None
,
None
,
False
,
True
,
False
),
# fused_conv
(
'invertedbottleneck'
,
3
,
1
,
40
,
'relu6'
,
None
,
4.
,
None
,
None
,
False
,
True
,
False
),
# fused_conv
(
'invertedbottleneck'
,
3
,
1
,
40
,
'relu6'
,
None
,
4.
,
None
,
None
,
False
,
True
,
True
),
# fused_conv
(
'invertedbottleneck'
,
3
,
2
,
72
,
'relu6'
,
None
,
8
,
None
,
None
,
True
,
False
,
False
),
(
'invertedbottleneck'
,
3
,
1
,
72
,
'relu6'
,
None
,
8
,
None
,
None
,
True
,
True
,
False
),
(
'invertedbottleneck'
,
3
,
1
,
72
,
'relu6'
,
None
,
4.
,
None
,
None
,
False
,
True
,
False
),
# fused_conv
(
'invertedbottleneck'
,
3
,
1
,
72
,
'relu6'
,
None
,
4.
,
None
,
None
,
False
,
True
,
False
),
# fused_conv
(
'invertedbottleneck'
,
5
,
1
,
96
,
'relu6'
,
None
,
8
,
None
,
None
,
True
,
False
,
False
),
(
'invertedbottleneck'
,
5
,
1
,
96
,
'relu6'
,
None
,
8
,
None
,
None
,
True
,
True
,
False
),
(
'invertedbottleneck'
,
3
,
1
,
96
,
'relu6'
,
None
,
8
,
None
,
None
,
True
,
True
,
False
),
(
'invertedbottleneck'
,
3
,
1
,
96
,
'relu6'
,
None
,
8
,
None
,
None
,
True
,
True
,
True
),
(
'invertedbottleneck'
,
5
,
2
,
120
,
'relu6'
,
None
,
8
,
None
,
None
,
True
,
False
,
False
),
(
'invertedbottleneck'
,
3
,
1
,
120
,
'relu6'
,
None
,
8
,
None
,
None
,
True
,
True
,
False
),
(
'invertedbottleneck'
,
5
,
1
,
120
,
'relu6'
,
None
,
4
,
None
,
None
,
True
,
True
,
False
),
(
'invertedbottleneck'
,
3
,
1
,
120
,
'relu6'
,
None
,
8
,
None
,
None
,
True
,
True
,
False
),
(
'invertedbottleneck'
,
5
,
1
,
384
,
'relu6'
,
None
,
8
,
None
,
None
,
True
,
False
,
True
),
]
}
MD_GPU_BLOCK_SPECS
=
{
'spec_name'
:
'MobileDetGPU'
,
# [use_depthwise] is set to False for fused_conv
# [se_ratio] is set to None for all inverted_bottleneck layers
# [activation] is set to 'relu6' for all applicable layers
'block_spec_schema'
:
[
'block_fn'
,
'kernel_size'
,
'strides'
,
'filters'
,
'activation'
,
'se_ratio'
,
'expand_ratio'
,
'input_compression_ratio'
,
'output_compression_ratio'
,
'use_depthwise'
,
'use_residual'
,
'is_output'
],
'block_specs'
:
[
# block 0
(
'convbn'
,
3
,
2
,
32
,
'relu6'
,
None
,
None
,
None
,
None
,
None
,
None
,
False
),
# block 1
(
'tucker'
,
3
,
1
,
16
,
'relu6'
,
None
,
None
,
0.25
,
0.25
,
None
,
False
,
True
),
# block 2
(
'invertedbottleneck'
,
3
,
2
,
32
,
'relu6'
,
None
,
8.
,
None
,
None
,
False
,
False
,
False
),
# fused_conv
(
'tucker'
,
3
,
1
,
32
,
'relu6'
,
None
,
None
,
0.25
,
0.25
,
None
,
True
,
False
),
(
'tucker'
,
3
,
1
,
32
,
'relu6'
,
None
,
None
,
0.25
,
0.25
,
None
,
True
,
False
),
(
'tucker'
,
3
,
1
,
32
,
'relu6'
,
None
,
None
,
0.25
,
0.25
,
None
,
True
,
True
),
# block 3
(
'invertedbottleneck'
,
3
,
2
,
64
,
'relu6'
,
None
,
8.
,
None
,
None
,
False
,
False
,
False
),
# fused_conv
(
'invertedbottleneck'
,
3
,
1
,
64
,
'relu6'
,
None
,
8.
,
None
,
None
,
False
,
True
,
False
),
# fused_conv
(
'invertedbottleneck'
,
3
,
1
,
64
,
'relu6'
,
None
,
8.
,
None
,
None
,
False
,
True
,
False
),
# fused_conv
(
'invertedbottleneck'
,
3
,
1
,
64
,
'relu6'
,
None
,
4.
,
None
,
None
,
False
,
True
,
True
),
# fused_conv
# block 4
(
'invertedbottleneck'
,
3
,
2
,
128
,
'relu6'
,
None
,
8.
,
None
,
None
,
False
,
False
,
False
),
# fused_conv
(
'invertedbottleneck'
,
3
,
1
,
128
,
'relu6'
,
None
,
4.
,
None
,
None
,
False
,
True
,
False
),
# fused_conv
(
'invertedbottleneck'
,
3
,
1
,
128
,
'relu6'
,
None
,
4.
,
None
,
None
,
False
,
True
,
False
),
# fused_conv
(
'invertedbottleneck'
,
3
,
1
,
128
,
'relu6'
,
None
,
4.
,
None
,
None
,
False
,
True
,
False
),
# fused_conv
# block 5
(
'invertedbottleneck'
,
3
,
1
,
128
,
'relu6'
,
None
,
8.
,
None
,
None
,
False
,
False
,
False
),
# fused_conv
(
'invertedbottleneck'
,
3
,
1
,
128
,
'relu6'
,
None
,
8.
,
None
,
None
,
False
,
True
,
False
),
# fused_conv
(
'invertedbottleneck'
,
3
,
1
,
128
,
'relu6'
,
None
,
8.
,
None
,
None
,
False
,
True
,
False
),
# fused_conv
(
'invertedbottleneck'
,
3
,
1
,
128
,
'relu6'
,
None
,
8.
,
None
,
None
,
False
,
True
,
True
),
# fused_conv
# block 6
(
'invertedbottleneck'
,
3
,
2
,
128
,
'relu6'
,
None
,
4.
,
None
,
None
,
False
,
False
,
False
),
# fused_conv
(
'invertedbottleneck'
,
3
,
1
,
128
,
'relu6'
,
None
,
4.
,
None
,
None
,
False
,
True
,
False
),
# fused_conv
(
'invertedbottleneck'
,
3
,
1
,
128
,
'relu6'
,
None
,
4.
,
None
,
None
,
False
,
True
,
False
),
# fused_conv
(
'invertedbottleneck'
,
3
,
1
,
128
,
'relu6'
,
None
,
4.
,
None
,
None
,
False
,
True
,
False
),
# fused_conv
# block 7
(
'invertedbottleneck'
,
3
,
1
,
384
,
'relu6'
,
None
,
8
,
None
,
None
,
True
,
False
,
True
),
]
}
SUPPORTED_SPECS_MAP
=
{
'MobileDetCPU'
:
MD_CPU_BLOCK_SPECS
,
'MobileDetDSP'
:
MD_DSP_BLOCK_SPECS
,
'MobileDetEdgeTPU'
:
MD_EdgeTPU_BLOCK_SPECS
,
'MobileDetGPU'
:
MD_GPU_BLOCK_SPECS
,
}
@
dataclasses
.
dataclass
class
BlockSpec
(
hyperparams
.
Config
):
"""A container class that specifies the block configuration for MobileDet."""
block_fn
:
str
=
'convbn'
kernel_size
:
int
=
3
strides
:
int
=
1
filters
:
int
=
32
use_bias
:
bool
=
False
use_normalization
:
bool
=
True
activation
:
str
=
'relu6'
is_output
:
bool
=
True
# Used for block type InvertedResConv and TuckerConvBlock.
use_residual
:
bool
=
True
# Used for block type InvertedResConv only.
use_depthwise
:
bool
=
True
expand_ratio
:
Optional
[
float
]
=
8.
se_ratio
:
Optional
[
float
]
=
None
# Used for block type TuckerConvBlock only.
input_compression_ratio
:
Optional
[
float
]
=
None
output_compression_ratio
:
Optional
[
float
]
=
None
def
block_spec_decoder
(
specs
:
Dict
[
Any
,
Any
],
filter_size_scale
:
float
,
divisible_by
:
int
=
8
)
->
List
[
BlockSpec
]:
"""Decodes specs for a block.
Args:
specs: A `dict` specification of block specs of a mobiledet version.
filter_size_scale: A `float` multiplier for the filter size for all
convolution ops. The value must be greater than zero. Typical usage will
be to set this value in (0, 1) to reduce the number of parameters or
computation cost of the model.
divisible_by: An `int` that ensures all inner dimensions are divisible by
this number.
Returns:
A list of `BlockSpec` that defines structure of the base network.
"""
spec_name
=
specs
[
'spec_name'
]
block_spec_schema
=
specs
[
'block_spec_schema'
]
block_specs
=
specs
[
'block_specs'
]
if
not
block_specs
:
raise
ValueError
(
'The block spec cannot be empty for {} !'
.
format
(
spec_name
))
if
len
(
block_specs
[
0
])
!=
len
(
block_spec_schema
):
raise
ValueError
(
'The block spec values {} do not match with '
'the schema {}'
.
format
(
block_specs
[
0
],
block_spec_schema
))
decoded_specs
=
[]
for
s
in
block_specs
:
kw_s
=
dict
(
zip
(
block_spec_schema
,
s
))
decoded_specs
.
append
(
BlockSpec
(
**
kw_s
))
for
ds
in
decoded_specs
:
if
ds
.
filters
:
ds
.
filters
=
nn_layers
.
round_filters
(
filters
=
ds
.
filters
,
multiplier
=
filter_size_scale
,
divisor
=
divisible_by
,
round_down_protect
=
False
,
min_depth
=
8
)
return
decoded_specs
@
tf
.
keras
.
utils
.
register_keras_serializable
(
package
=
'Vision'
)
class
MobileDet
(
tf
.
keras
.
Model
):
"""Creates a MobileDet family model."""
def
__init__
(
self
,
model_id
:
str
=
'MobileDetCPU'
,
filter_size_scale
:
float
=
1.0
,
input_specs
:
tf
.
keras
.
layers
.
InputSpec
=
layers
.
InputSpec
(
shape
=
[
None
,
None
,
None
,
3
]),
# The followings are for hyper-parameter tuning.
norm_momentum
:
float
=
0.99
,
norm_epsilon
:
float
=
0.001
,
kernel_initializer
:
str
=
'VarianceScaling'
,
kernel_regularizer
:
Optional
[
tf
.
keras
.
regularizers
.
Regularizer
]
=
None
,
bias_regularizer
:
Optional
[
tf
.
keras
.
regularizers
.
Regularizer
]
=
None
,
# The followings should be kept the same most of the times.
min_depth
:
int
=
8
,
divisible_by
:
int
=
8
,
regularize_depthwise
:
bool
=
False
,
use_sync_bn
:
bool
=
False
,
**
kwargs
):
"""Initializes a MobileDet model.
Args:
model_id: A `str` of MobileDet version. The supported values are
`MobileDetCPU`, `MobileDetDSP`, `MobileDetEdgeTPU`, `MobileDetGPU`.
filter_size_scale: A `float` of multiplier for the filters (number of
channels) for all convolution ops. The value must be greater than zero.
Typical usage will be to set this value in (0, 1) to reduce the number
of parameters or computation cost of the model.
input_specs: A `tf.keras.layers.InputSpec` of specs of the input tensor.
norm_momentum: A `float` of normalization momentum for the moving average.
norm_epsilon: A `float` added to variance to avoid dividing by zero.
kernel_initializer: A `str` for kernel initializer of convolutional
layers.
kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for
Conv2D. Default to None.
bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D.
Default to None.
min_depth: An `int` of minimum depth (number of channels) for all
convolution ops. Enforced when filter_size_scale < 1, and not an active
constraint when filter_size_scale >= 1.
divisible_by: An `int` that ensures all inner dimensions are divisible by
this number.
regularize_depthwise: If Ture, apply regularization on depthwise.
use_sync_bn: If True, use synchronized batch normalization.
**kwargs: Additional keyword arguments to be passed.
"""
if
model_id
not
in
SUPPORTED_SPECS_MAP
:
raise
ValueError
(
'The MobileDet version {} '
'is not supported'
.
format
(
model_id
))
if
filter_size_scale
<=
0
:
raise
ValueError
(
'filter_size_scale is not greater than zero.'
)
self
.
_model_id
=
model_id
self
.
_input_specs
=
input_specs
self
.
_filter_size_scale
=
filter_size_scale
self
.
_min_depth
=
min_depth
self
.
_divisible_by
=
divisible_by
self
.
_regularize_depthwise
=
regularize_depthwise
self
.
_kernel_initializer
=
kernel_initializer
self
.
_kernel_regularizer
=
kernel_regularizer
self
.
_bias_regularizer
=
bias_regularizer
self
.
_use_sync_bn
=
use_sync_bn
self
.
_norm_momentum
=
norm_momentum
self
.
_norm_epsilon
=
norm_epsilon
inputs
=
tf
.
keras
.
Input
(
shape
=
input_specs
.
shape
[
1
:])
block_specs
=
SUPPORTED_SPECS_MAP
.
get
(
model_id
)
self
.
_decoded_specs
=
block_spec_decoder
(
specs
=
block_specs
,
filter_size_scale
=
self
.
_filter_size_scale
,
divisible_by
=
self
.
_get_divisible_by
())
x
,
endpoints
,
next_endpoint_level
=
self
.
_mobiledet_base
(
inputs
=
inputs
)
self
.
_output_specs
=
{
l
:
endpoints
[
l
].
get_shape
()
for
l
in
endpoints
}
super
(
MobileDet
,
self
).
__init__
(
inputs
=
inputs
,
outputs
=
endpoints
,
**
kwargs
)
def
_get_divisible_by
(
self
):
return
self
.
_divisible_by
def
_mobiledet_base
(
self
,
inputs
:
tf
.
Tensor
)
->
Tuple
[
tf
.
Tensor
,
Dict
[
str
,
tf
.
Tensor
],
int
]:
"""Builds the base MobileDet architecture.
Args:
inputs: A `tf.Tensor` of shape `[batch_size, height, width, channels]`.
Returns:
A tuple of output Tensor and dictionary that collects endpoints.
"""
input_shape
=
inputs
.
get_shape
().
as_list
()
if
len
(
input_shape
)
!=
4
:
raise
ValueError
(
'Expected rank 4 input, was: %d'
%
len
(
input_shape
))
net
=
inputs
endpoints
=
{}
endpoint_level
=
1
for
i
,
block_def
in
enumerate
(
self
.
_decoded_specs
):
block_name
=
'block_group_{}_{}'
.
format
(
block_def
.
block_fn
,
i
)
if
block_def
.
block_fn
==
'convbn'
:
net
=
mobilenet
.
Conv2DBNBlock
(
filters
=
block_def
.
filters
,
kernel_size
=
block_def
.
kernel_size
,
strides
=
block_def
.
strides
,
activation
=
block_def
.
activation
,
use_bias
=
block_def
.
use_bias
,
use_normalization
=
block_def
.
use_normalization
,
kernel_initializer
=
self
.
_kernel_initializer
,
kernel_regularizer
=
self
.
_kernel_regularizer
,
bias_regularizer
=
self
.
_bias_regularizer
,
use_sync_bn
=
self
.
_use_sync_bn
,
norm_momentum
=
self
.
_norm_momentum
,
norm_epsilon
=
self
.
_norm_epsilon
)(
net
)
elif
block_def
.
block_fn
==
'invertedbottleneck'
:
in_filters
=
net
.
shape
.
as_list
()[
-
1
]
net
=
nn_blocks
.
InvertedBottleneckBlock
(
in_filters
=
in_filters
,
out_filters
=
block_def
.
filters
,
kernel_size
=
block_def
.
kernel_size
,
strides
=
block_def
.
strides
,
expand_ratio
=
block_def
.
expand_ratio
,
se_ratio
=
block_def
.
se_ratio
,
se_inner_activation
=
block_def
.
activation
,
se_gating_activation
=
'sigmoid'
,
se_round_down_protect
=
False
,
expand_se_in_filters
=
True
,
activation
=
block_def
.
activation
,
use_depthwise
=
block_def
.
use_depthwise
,
use_residual
=
block_def
.
use_residual
,
regularize_depthwise
=
self
.
_regularize_depthwise
,
kernel_initializer
=
self
.
_kernel_initializer
,
kernel_regularizer
=
self
.
_kernel_regularizer
,
bias_regularizer
=
self
.
_bias_regularizer
,
use_sync_bn
=
self
.
_use_sync_bn
,
norm_momentum
=
self
.
_norm_momentum
,
norm_epsilon
=
self
.
_norm_epsilon
,
divisible_by
=
self
.
_get_divisible_by
()
)(
net
)
elif
block_def
.
block_fn
==
'tucker'
:
in_filters
=
net
.
shape
.
as_list
()[
-
1
]
net
=
nn_blocks
.
TuckerConvBlock
(
in_filters
=
in_filters
,
out_filters
=
block_def
.
filters
,
kernel_size
=
block_def
.
kernel_size
,
strides
=
block_def
.
strides
,
input_compression_ratio
=
block_def
.
input_compression_ratio
,
output_compression_ratio
=
block_def
.
output_compression_ratio
,
activation
=
block_def
.
activation
,
use_residual
=
block_def
.
use_residual
,
kernel_initializer
=
self
.
_kernel_initializer
,
kernel_regularizer
=
self
.
_kernel_regularizer
,
bias_regularizer
=
self
.
_bias_regularizer
,
use_sync_bn
=
self
.
_use_sync_bn
,
norm_momentum
=
self
.
_norm_momentum
,
norm_epsilon
=
self
.
_norm_epsilon
,
divisible_by
=
self
.
_get_divisible_by
()
)(
net
)
else
:
raise
ValueError
(
'Unknown block type {} for layer {}'
.
format
(
block_def
.
block_fn
,
i
))
net
=
tf
.
keras
.
layers
.
Activation
(
'linear'
,
name
=
block_name
)(
net
)
if
block_def
.
is_output
:
endpoints
[
str
(
endpoint_level
)]
=
net
endpoint_level
+=
1
return
net
,
endpoints
,
endpoint_level
def
get_config
(
self
):
config_dict
=
{
'model_id'
:
self
.
_model_id
,
'filter_size_scale'
:
self
.
_filter_size_scale
,
'min_depth'
:
self
.
_min_depth
,
'divisible_by'
:
self
.
_divisible_by
,
'regularize_depthwise'
:
self
.
_regularize_depthwise
,
'kernel_initializer'
:
self
.
_kernel_initializer
,
'kernel_regularizer'
:
self
.
_kernel_regularizer
,
'bias_regularizer'
:
self
.
_bias_regularizer
,
'use_sync_bn'
:
self
.
_use_sync_bn
,
'norm_momentum'
:
self
.
_norm_momentum
,
'norm_epsilon'
:
self
.
_norm_epsilon
,
}
return
config_dict
@
classmethod
def
from_config
(
cls
,
config
,
custom_objects
=
None
):
return
cls
(
**
config
)
@
property
def
output_specs
(
self
):
"""A dict of {level: TensorShape} pairs for the model output."""
return
self
.
_output_specs
@
factory
.
register_backbone_builder
(
'mobiledet'
)
def
build_mobiledet
(
input_specs
:
tf
.
keras
.
layers
.
InputSpec
,
backbone_config
:
hyperparams
.
Config
,
norm_activation_config
:
hyperparams
.
Config
,
l2_regularizer
:
Optional
[
tf
.
keras
.
regularizers
.
Regularizer
]
=
None
)
->
tf
.
keras
.
Model
:
"""Builds MobileDet backbone from a config."""
backbone_type
=
backbone_config
.
type
backbone_cfg
=
backbone_config
.
get
()
assert
backbone_type
==
'mobiledet'
,
(
f
'Inconsistent backbone type '
f
'
{
backbone_type
}
'
)
return
MobileDet
(
model_id
=
backbone_cfg
.
model_id
,
filter_size_scale
=
backbone_cfg
.
filter_size_scale
,
input_specs
=
input_specs
,
use_sync_bn
=
norm_activation_config
.
use_sync_bn
,
norm_momentum
=
norm_activation_config
.
norm_momentum
,
norm_epsilon
=
norm_activation_config
.
norm_epsilon
,
kernel_regularizer
=
l2_regularizer
)
official/vision/beta/modeling/backbones/mobiledet_test.py
0 → 100644
View file @
c57e975a
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Mobiledet."""
import
itertools
from
absl.testing
import
parameterized
import
tensorflow
as
tf
from
official.vision.beta.modeling.backbones
import
mobiledet
class
MobileDetTest
(
parameterized
.
TestCase
,
tf
.
test
.
TestCase
):
@
parameterized
.
parameters
(
'MobileDetCPU'
,
'MobileDetDSP'
,
'MobileDetEdgeTPU'
,
'MobileDetGPU'
,
)
def
test_serialize_deserialize
(
self
,
model_id
):
# Create a network object that sets all of its config options.
kwargs
=
dict
(
model_id
=
model_id
,
filter_size_scale
=
1.0
,
use_sync_bn
=
False
,
kernel_initializer
=
'VarianceScaling'
,
kernel_regularizer
=
None
,
bias_regularizer
=
None
,
norm_momentum
=
0.99
,
norm_epsilon
=
0.001
,
min_depth
=
8
,
divisible_by
=
8
,
regularize_depthwise
=
False
,
)
network
=
mobiledet
.
MobileDet
(
**
kwargs
)
expected_config
=
dict
(
kwargs
)
self
.
assertEqual
(
network
.
get_config
(),
expected_config
)
# Create another network object from the first object's config.
new_network
=
mobiledet
.
MobileDet
.
from_config
(
network
.
get_config
())
# Validate that the config can be forced to JSON.
_
=
new_network
.
to_json
()
# If the serialization was successful, the new config should match the old.
self
.
assertAllEqual
(
network
.
get_config
(),
new_network
.
get_config
())
@
parameterized
.
parameters
(
itertools
.
product
(
[
1
,
3
],
[
'MobileDetCPU'
,
'MobileDetDSP'
,
'MobileDetEdgeTPU'
,
'MobileDetGPU'
,
],
))
def
test_input_specs
(
self
,
input_dim
,
model_id
):
"""Test different input feature dimensions."""
tf
.
keras
.
backend
.
set_image_data_format
(
'channels_last'
)
input_specs
=
tf
.
keras
.
layers
.
InputSpec
(
shape
=
[
None
,
None
,
None
,
input_dim
])
network
=
mobiledet
.
MobileDet
(
model_id
=
model_id
,
input_specs
=
input_specs
)
inputs
=
tf
.
keras
.
Input
(
shape
=
(
128
,
128
,
input_dim
),
batch_size
=
1
)
_
=
network
(
inputs
)
@
parameterized
.
parameters
(
itertools
.
product
(
[
'MobileDetCPU'
,
'MobileDetDSP'
,
'MobileDetEdgeTPU'
,
'MobileDetGPU'
,
],
[
32
,
224
],
))
def
test_mobiledet_creation
(
self
,
model_id
,
input_size
):
"""Test creation of MobileDet family models."""
tf
.
keras
.
backend
.
set_image_data_format
(
'channels_last'
)
mobiledet_layers
=
{
# The number of filters of layers having outputs been collected
# for filter_size_scale = 1.0
'MobileDetCPU'
:
[
8
,
16
,
32
,
72
,
144
],
'MobileDetDSP'
:
[
24
,
32
,
64
,
144
,
240
],
'MobileDetEdgeTPU'
:
[
16
,
16
,
40
,
96
,
384
],
'MobileDetGPU'
:
[
16
,
32
,
64
,
128
,
384
],
}
network
=
mobiledet
.
MobileDet
(
model_id
=
model_id
,
filter_size_scale
=
1.0
)
inputs
=
tf
.
keras
.
Input
(
shape
=
(
input_size
,
input_size
,
3
),
batch_size
=
1
)
endpoints
=
network
(
inputs
)
for
idx
,
num_filter
in
enumerate
(
mobiledet_layers
[
model_id
]):
self
.
assertAllEqual
(
[
1
,
input_size
/
2
**
(
idx
+
1
),
input_size
/
2
**
(
idx
+
1
),
num_filter
],
endpoints
[
str
(
idx
+
1
)].
shape
.
as_list
())
official/vision/beta/modeling/backbones/mobilenet.py
View file @
c57e975a
...
...
@@ -574,7 +574,7 @@ class MobileNet(tf.keras.Model):
Args:
model_id: A `str` of MobileNet version. The supported values are
`MobileNetV1`, `MobileNetV2`, `MobileNetV3Large`, `MobileNetV3Small`,
and
`MobileNetV3EdgeTPU`.
`MobileNetV3EdgeTPU`
, `MobileNetMultiMAX` and `MobileNetMultiAVG`
.
filter_size_scale: A `float` of multiplier for the filters (number of
channels) for all convolution ops. The value must be greater than zero.
Typical usage will be to set this value in (0, 1) to reduce the number
...
...
official/vision/beta/modeling/decoders/aspp.py
View file @
c57e975a
...
...
@@ -13,7 +13,7 @@
# limitations under the License.
"""Contains definitions of Atrous Spatial Pyramid Pooling (ASPP) decoder."""
from
typing
import
Any
,
List
,
Mapping
,
Optional
from
typing
import
Any
,
List
,
Mapping
,
Optional
,
Union
# Import libraries
...
...
@@ -22,6 +22,9 @@ import tensorflow as tf
from
official.modeling
import
hyperparams
from
official.vision.beta.modeling.decoders
import
factory
from
official.vision.beta.modeling.layers
import
deeplab
from
official.vision.beta.modeling.layers
import
nn_layers
TensorMapUnion
=
Union
[
tf
.
Tensor
,
Mapping
[
str
,
tf
.
Tensor
]]
@
tf
.
keras
.
utils
.
register_keras_serializable
(
package
=
'Vision'
)
...
...
@@ -43,6 +46,8 @@ class ASPP(tf.keras.layers.Layer):
kernel_regularizer
:
Optional
[
tf
.
keras
.
regularizers
.
Regularizer
]
=
None
,
interpolation
:
str
=
'bilinear'
,
use_depthwise_convolution
:
bool
=
False
,
spp_layer_version
:
str
=
'v1'
,
output_tensor
:
bool
=
False
,
**
kwargs
):
"""Initializes an Atrous Spatial Pyramid Pooling (ASPP) layer.
...
...
@@ -67,9 +72,12 @@ class ASPP(tf.keras.layers.Layer):
`gaussian`, or `mitchellcubic`.
use_depthwise_convolution: If True depthwise separable convolutions will
be added to the Atrous spatial pyramid pooling.
spp_layer_version: A `str` of spatial pyramid pooling layer version.
output_tensor: Whether to output a single tensor or a dictionary of tensor.
Default is false.
**kwargs: Additional keyword arguments to be passed.
"""
super
(
ASPP
,
self
).
__init__
(
**
kwargs
)
super
().
__init__
(
**
kwargs
)
self
.
_config_dict
=
{
'level'
:
level
,
'dilation_rates'
:
dilation_rates
,
...
...
@@ -84,7 +92,11 @@ class ASPP(tf.keras.layers.Layer):
'kernel_regularizer'
:
kernel_regularizer
,
'interpolation'
:
interpolation
,
'use_depthwise_convolution'
:
use_depthwise_convolution
,
'spp_layer_version'
:
spp_layer_version
,
'output_tensor'
:
output_tensor
}
self
.
_aspp_layer
=
deeplab
.
SpatialPyramidPooling
if
self
.
_config_dict
[
'spp_layer_version'
]
==
'v1'
else
nn_layers
.
SpatialPyramidPooling
def
build
(
self
,
input_shape
):
pool_kernel_size
=
None
...
...
@@ -93,7 +105,8 @@ class ASPP(tf.keras.layers.Layer):
int
(
p_size
//
2
**
self
.
_config_dict
[
'level'
])
for
p_size
in
self
.
_config_dict
[
'pool_kernel_size'
]
]
self
.
aspp
=
deeplab
.
SpatialPyramidPooling
(
self
.
aspp
=
self
.
_aspp_layer
(
output_channels
=
self
.
_config_dict
[
'num_filters'
],
dilation_rates
=
self
.
_config_dict
[
'dilation_rates'
],
pool_kernel_size
=
pool_kernel_size
,
...
...
@@ -108,28 +121,32 @@ class ASPP(tf.keras.layers.Layer):
use_depthwise_convolution
=
self
.
_config_dict
[
'use_depthwise_convolution'
]
)
def
call
(
self
,
inputs
:
Mapping
[
str
,
tf
.
Tensor
])
->
Mapping
[
str
,
tf
.
Tensor
]
:
def
call
(
self
,
inputs
:
TensorMapUnion
)
->
TensorMapUnion
:
"""Calls the Atrous Spatial Pyramid Pooling (ASPP) layer on an input.
The output of ASPP will be a dict of {`level`, `tf.Tensor`} even if only one
level is present. Hence, this will be compatible with the rest of the
segmentation model interfaces.
level is present, if output_tensor is false. Hence, this will be compatible
with the rest of the segmentation model interfaces.
If output_tensor is true, a single tensot is output.
Args:
inputs: A `dict` of `tf.Tensor` where
inputs: A `tf.Tensor` of shape [batch, height_l, width_l, filter_size] or
a `dict` of `tf.Tensor` where
- key: A `str` of the level of the multilevel feature maps.
- values: A `tf.Tensor` of shape [batch, height_l, width_l,
filter_size].
Returns:
A `dict` of `tf.Tensor` where
A `tf.Tensor` of shape [batch, height_l, width_l, filter_size] or a `dict`
of `tf.Tensor` where
- key: A `str` of the level of the multilevel feature maps.
- values: A `tf.Tensor` of output of ASPP module.
"""
outputs
=
{}
level
=
str
(
self
.
_config_dict
[
'level'
])
outputs
[
level
]
=
self
.
aspp
(
inputs
[
level
])
return
outputs
backbone_output
=
inputs
[
level
]
if
isinstance
(
inputs
,
dict
)
else
inputs
outputs
=
self
.
aspp
(
backbone_output
)
return
outputs
if
self
.
_config_dict
[
'output_tensor'
]
else
{
level
:
outputs
}
def
get_config
(
self
)
->
Mapping
[
str
,
Any
]:
return
self
.
_config_dict
...
...
@@ -180,4 +197,6 @@ def build_aspp_decoder(
norm_momentum
=
norm_activation_config
.
norm_momentum
,
norm_epsilon
=
norm_activation_config
.
norm_epsilon
,
activation
=
norm_activation_config
.
activation
,
kernel_regularizer
=
l2_regularizer
)
kernel_regularizer
=
l2_regularizer
,
spp_layer_version
=
decoder_cfg
.
spp_layer_version
,
output_tensor
=
decoder_cfg
.
output_tensor
)
official/vision/beta/modeling/decoders/aspp_test.py
View file @
c57e975a
...
...
@@ -26,14 +26,15 @@ from official.vision.beta.modeling.decoders import aspp
class
ASPPTest
(
parameterized
.
TestCase
,
tf
.
test
.
TestCase
):
@
parameterized
.
parameters
(
(
3
,
[
6
,
12
,
18
,
24
],
128
),
(
3
,
[
6
,
12
,
18
],
128
),
(
3
,
[
6
,
12
],
256
),
(
4
,
[
6
,
12
,
18
,
24
],
128
),
(
4
,
[
6
,
12
,
18
],
128
),
(
4
,
[
6
,
12
],
256
),
(
3
,
[
6
,
12
,
18
,
24
],
128
,
'v1'
),
(
3
,
[
6
,
12
,
18
],
128
,
'v1'
),
(
3
,
[
6
,
12
],
256
,
'v1'
),
(
4
,
[
6
,
12
,
18
,
24
],
128
,
'v2'
),
(
4
,
[
6
,
12
,
18
],
128
,
'v2'
),
(
4
,
[
6
,
12
],
256
,
'v2'
),
)
def
test_network_creation
(
self
,
level
,
dilation_rates
,
num_filters
):
def
test_network_creation
(
self
,
level
,
dilation_rates
,
num_filters
,
spp_layer_version
):
"""Test creation of ASPP."""
input_size
=
256
...
...
@@ -45,7 +46,8 @@ class ASPPTest(parameterized.TestCase, tf.test.TestCase):
network
=
aspp
.
ASPP
(
level
=
level
,
dilation_rates
=
dilation_rates
,
num_filters
=
num_filters
)
num_filters
=
num_filters
,
spp_layer_version
=
spp_layer_version
)
endpoints
=
backbone
(
inputs
)
feats
=
network
(
endpoints
)
...
...
@@ -71,7 +73,8 @@ class ASPPTest(parameterized.TestCase, tf.test.TestCase):
interpolation
=
'bilinear'
,
dropout_rate
=
0.2
,
use_depthwise_convolution
=
'false'
,
)
spp_layer_version
=
'v1'
,
output_tensor
=
False
)
network
=
aspp
.
ASPP
(
**
kwargs
)
expected_config
=
dict
(
kwargs
)
...
...
official/vision/beta/modeling/decoders/nasfpn.py
View file @
c57e975a
...
...
@@ -22,6 +22,7 @@ from absl import logging
import
tensorflow
as
tf
from
official.modeling
import
hyperparams
from
official.modeling
import
tf_utils
from
official.vision.beta.modeling.decoders
import
factory
from
official.vision.beta.ops
import
spatial_transform_ops
...
...
@@ -165,12 +166,7 @@ class NASFPN(tf.keras.Model):
'momentum'
:
self
.
_config_dict
[
'norm_momentum'
],
'epsilon'
:
self
.
_config_dict
[
'norm_epsilon'
],
}
if
activation
==
'relu'
:
self
.
_activation
=
tf
.
nn
.
relu
elif
activation
==
'swish'
:
self
.
_activation
=
tf
.
nn
.
swish
else
:
raise
ValueError
(
'Activation {} not implemented.'
.
format
(
activation
))
self
.
_activation
=
tf_utils
.
get_activation
(
activation
)
# Gets input feature pyramid from backbone.
inputs
=
self
.
_build_input_pyramid
(
input_specs
,
min_level
)
...
...
@@ -238,7 +234,11 @@ class NASFPN(tf.keras.Model):
# dtype mismatch when one input (by default float32 dtype) does not meet all
# the above conditions and is output unchanged, while other inputs are
# processed to have different dtype, e.g., using bfloat16 on TPU.
return
tf
.
cast
(
x
,
dtype
=
tf
.
keras
.
layers
.
Layer
().
dtype_policy
.
compute_dtype
)
compute_dtype
=
tf
.
keras
.
layers
.
Layer
().
dtype_policy
.
compute_dtype
if
(
compute_dtype
is
not
None
)
and
(
x
.
dtype
!=
compute_dtype
):
return
tf
.
cast
(
x
,
dtype
=
compute_dtype
)
else
:
return
x
def
_global_attention
(
self
,
feat0
,
feat1
):
m
=
tf
.
math
.
reduce_max
(
feat0
,
axis
=
[
1
,
2
],
keepdims
=
True
)
...
...
official/vision/beta/modeling/heads/segmentation_heads.py
View file @
c57e975a
...
...
@@ -13,7 +13,7 @@
# limitations under the License.
"""Contains definitions of segmentation heads."""
from
typing
import
List
,
Union
,
Optional
,
Mapping
from
typing
import
List
,
Union
,
Optional
,
Mapping
,
Tuple
import
tensorflow
as
tf
from
official.modeling
import
tf_utils
...
...
@@ -204,16 +204,19 @@ class SegmentationHead(tf.keras.layers.Layer):
super
(
SegmentationHead
,
self
).
build
(
input_shape
)
def
call
(
self
,
backbone_output
:
Mapping
[
str
,
tf
.
Tensor
],
decoder_output
:
Mapping
[
str
,
tf
.
Tensor
]):
def
call
(
self
,
inputs
:
Tuple
[
Union
[
tf
.
Tensor
,
Mapping
[
str
,
tf
.
Tensor
]
]
,
Union
[
tf
.
Tensor
,
Mapping
[
str
,
tf
.
Tensor
]
]]
):
"""Forward pass of the segmentation head.
It supports both a tuple of 2 tensors or 2 dictionaries. The first is
backbone endpoints, and the second is decoder endpoints. When inputs are
tensors, they are from a single level of feature maps. When inputs are
dictionaries, they contain multiple levels of feature maps, where the key
is the index of feature map.
Args:
backbone_output: A `dict` of tensors
- key: A `str` of the level of the multilevel features.
- values: A `tf.Tensor` of the feature map tensors, whose shape is
[batch, height_l, width_l, channels].
decoder_output: A `dict` of tensors
inputs: A tuple of 2 feature map tensors of shape
[batch, height_l, width_l, channels] or 2 dictionaries of tensors:
- key: A `str` of the level of the multilevel features.
- values: A `tf.Tensor` of the feature map tensors, whose shape is
[batch, height_l, width_l, channels].
...
...
@@ -221,11 +224,14 @@ class SegmentationHead(tf.keras.layers.Layer):
segmentation prediction mask: A `tf.Tensor` of the segmentation mask
scores predicted from input features.
"""
backbone_output
=
inputs
[
0
]
decoder_output
=
inputs
[
1
]
if
self
.
_config_dict
[
'feature_fusion'
]
==
'deeplabv3plus'
:
# deeplabv3+ feature fusion
x
=
decoder_output
[
str
(
self
.
_config_dict
[
'level'
])]
y
=
backbone_output
[
str
(
self
.
_config_dict
[
'low_level'
])]
x
=
decoder_output
[
str
(
self
.
_config_dict
[
'level'
])]
if
isinstance
(
decoder_output
,
dict
)
else
decoder_output
y
=
backbone_output
[
str
(
self
.
_config_dict
[
'low_level'
])]
if
isinstance
(
backbone_output
,
dict
)
else
backbone_output
y
=
self
.
_dlv3p_norm
(
self
.
_dlv3p_conv
(
y
))
y
=
self
.
_activation
(
y
)
...
...
@@ -234,12 +240,15 @@ class SegmentationHead(tf.keras.layers.Layer):
x
=
tf
.
cast
(
x
,
dtype
=
y
.
dtype
)
x
=
tf
.
concat
([
x
,
y
],
axis
=
self
.
_bn_axis
)
elif
self
.
_config_dict
[
'feature_fusion'
]
==
'pyramid_fusion'
:
if
not
isinstance
(
decoder_output
,
dict
):
raise
ValueError
(
'Only support dictionary decoder_output.'
)
x
=
nn_layers
.
pyramid_feature_fusion
(
decoder_output
,
self
.
_config_dict
[
'level'
])
elif
self
.
_config_dict
[
'feature_fusion'
]
==
'panoptic_fpn_fusion'
:
x
=
self
.
_panoptic_fpn_fusion
(
decoder_output
)
else
:
x
=
decoder_output
[
str
(
self
.
_config_dict
[
'level'
])]
x
=
decoder_output
[
str
(
self
.
_config_dict
[
'level'
])]
if
isinstance
(
decoder_output
,
dict
)
else
decoder_output
for
conv
,
norm
in
zip
(
self
.
_convs
,
self
.
_norms
):
x
=
conv
(
x
)
...
...
official/vision/beta/modeling/heads/segmentation_heads_test.py
View file @
c57e975a
...
...
@@ -58,7 +58,7 @@ class SegmentationHeadTest(parameterized.TestCase, tf.test.TestCase):
decoder_max_level
=
decoder_max_level
,
num_decoder_filters
=
64
)
logits
=
head
(
backbone_features
,
decoder_features
)
logits
=
head
(
(
backbone_features
,
decoder_features
)
)
if
level
in
decoder_features
:
self
.
assertAllEqual
(
logits
.
numpy
().
shape
,
[
...
...
official/vision/beta/modeling/layers/deeplab.py
View file @
c57e975a
...
...
@@ -17,7 +17,6 @@
import
tensorflow
as
tf
@
tf
.
keras
.
utils
.
register_keras_serializable
(
package
=
'keras_cv'
)
class
SpatialPyramidPooling
(
tf
.
keras
.
layers
.
Layer
):
"""Implements the Atrous Spatial Pyramid Pooling.
...
...
official/vision/beta/modeling/layers/detection_generator.py
View file @
c57e975a
...
...
@@ -368,7 +368,7 @@ def _generate_detections_v2(boxes: tf.Tensor,
nmsed_boxes
=
tf
.
gather
(
nmsed_boxes
,
indices
,
batch_dims
=
1
,
axis
=
1
)
nmsed_classes
=
tf
.
gather
(
nmsed_classes
,
indices
,
batch_dims
=
1
)
valid_detections
=
tf
.
reduce_sum
(
input_tensor
=
tf
.
cast
(
tf
.
greater
(
nmsed_scores
,
-
1
),
tf
.
int32
),
axis
=
1
)
input_tensor
=
tf
.
cast
(
tf
.
greater
(
nmsed_scores
,
0.0
),
tf
.
int32
),
axis
=
1
)
return
nmsed_boxes
,
nmsed_scores
,
nmsed_classes
,
valid_detections
...
...
Prev
1
…
5
6
7
8
9
10
11
12
13
…
15
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment