Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ModelZoo
ResNet50_tensorflow
Commits
c57e975a
Commit
c57e975a
authored
Nov 29, 2021
by
saberkun
Browse files
Merge pull request #10338 from srihari-humbarwadi:readme
PiperOrigin-RevId: 413033276
parents
7fb4f3cd
acf4156e
Changes
291
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
65 additions
and
45 deletions
+65
-45
official/vision/beta/serving/semantic_segmentation_test.py
official/vision/beta/serving/semantic_segmentation_test.py
+1
-2
official/vision/beta/tasks/maskrcnn.py
official/vision/beta/tasks/maskrcnn.py
+3
-1
official/vision/beta/tasks/retinanet.py
official/vision/beta/tasks/retinanet.py
+11
-9
official/vision/detection/dataloader/anchor.py
official/vision/detection/dataloader/anchor.py
+8
-8
official/vision/detection/dataloader/input_reader.py
official/vision/detection/dataloader/input_reader.py
+0
-1
official/vision/detection/executor/detection_executor.py
official/vision/detection/executor/detection_executor.py
+1
-2
official/vision/detection/executor/distributed_executor.py
official/vision/detection/executor/distributed_executor.py
+0
-1
official/vision/detection/modeling/architecture/nn_blocks.py
official/vision/detection/modeling/architecture/nn_blocks.py
+0
-2
official/vision/detection/modeling/architecture/spinenet.py
official/vision/detection/modeling/architecture/spinenet.py
+0
-1
official/vision/detection/ops/target_ops.py
official/vision/detection/ops/target_ops.py
+1
-1
official/vision/detection/utils/box_utils.py
official/vision/detection/utils/box_utils.py
+0
-1
official/vision/detection/utils/input_utils.py
official/vision/detection/utils/input_utils.py
+1
-1
official/vision/image_classification/augment.py
official/vision/image_classification/augment.py
+0
-1
official/vision/image_classification/augment_test.py
official/vision/image_classification/augment_test.py
+0
-1
official/vision/image_classification/callbacks.py
official/vision/image_classification/callbacks.py
+0
-1
official/vision/image_classification/configs/base_configs.py
official/vision/image_classification/configs/base_configs.py
+37
-7
official/vision/image_classification/configs/configs.py
official/vision/image_classification/configs/configs.py
+2
-2
official/vision/image_classification/dataset_factory.py
official/vision/image_classification/dataset_factory.py
+0
-1
official/vision/image_classification/efficientnet/common_modules.py
...ision/image_classification/efficientnet/common_modules.py
+0
-1
official/vision/image_classification/efficientnet/tfhub_export.py
.../vision/image_classification/efficientnet/tfhub_export.py
+0
-1
No files found.
official/vision/beta/serving/semantic_segmentation_test.py
View file @
c57e975a
...
...
@@ -31,8 +31,7 @@ from official.vision.beta.serving import semantic_segmentation
class
SemanticSegmentationExportTest
(
tf
.
test
.
TestCase
,
parameterized
.
TestCase
):
def
_get_segmentation_module
(
self
):
params
=
exp_factory
.
get_exp_config
(
'seg_deeplabv3_pascal'
)
params
.
task
.
model
.
backbone
.
dilated_resnet
.
model_id
=
50
params
=
exp_factory
.
get_exp_config
(
'mnv2_deeplabv3_pascal'
)
segmentation_module
=
semantic_segmentation
.
SegmentationModule
(
params
,
batch_size
=
1
,
input_image_size
=
[
112
,
112
])
return
segmentation_module
...
...
official/vision/beta/tasks/maskrcnn.py
View file @
c57e975a
...
...
@@ -275,7 +275,9 @@ class MaskRCNNTask(base_task.Task):
self
.
_task_config
.
validation_data
.
input_path
,
self
.
_task_config
.
validation_data
.
file_type
,
self
.
_task_config
.
validation_data
.
num_examples
,
self
.
task_config
.
model
.
include_mask
,
annotation_path
)
self
.
task_config
.
model
.
include_mask
,
annotation_path
,
regenerate_source_id
=
self
.
_task_config
.
validation_data
.
decoder
.
simple_decoder
.
regenerate_source_id
)
self
.
coco_metric
=
coco_evaluator
.
COCOEvaluator
(
annotation_file
=
annotation_path
,
include_mask
=
self
.
_task_config
.
model
.
include_mask
,
...
...
official/vision/beta/tasks/retinanet.py
View file @
c57e975a
...
...
@@ -13,14 +13,14 @@
# limitations under the License.
"""RetinaNet task definition."""
from
typing
import
Any
,
Optional
,
List
,
Tuple
,
Mapping
from
typing
import
Any
,
List
,
Mapping
,
Optional
,
Tuple
from
absl
import
logging
import
tensorflow
as
tf
from
official.common
import
dataset_fn
from
official.core
import
base_task
from
official.core
import
task_factory
from
official.vision
import
keras_cv
from
official.vision.beta.configs
import
retinanet
as
exp_cfg
from
official.vision.beta.dataloaders
import
input_reader_factory
from
official.vision.beta.dataloaders
import
retinanet_input
...
...
@@ -28,6 +28,8 @@ from official.vision.beta.dataloaders import tf_example_decoder
from
official.vision.beta.dataloaders
import
tfds_factory
from
official.vision.beta.dataloaders
import
tf_example_label_map_decoder
from
official.vision.beta.evaluation
import
coco_evaluator
from
official.vision.beta.losses
import
focal_loss
from
official.vision.beta.losses
import
loss_utils
from
official.vision.beta.modeling
import
factory
...
...
@@ -155,9 +157,9 @@ class RetinaNetTask(base_task.Task):
if
head
.
name
not
in
outputs
[
'attribute_outputs'
]:
raise
ValueError
(
f
'Attribute
{
head
.
name
}
not found in model outputs.'
)
y_true_att
=
keras_cv
.
losse
s
.
multi_level_flatten
(
y_true_att
=
loss_util
s
.
multi_level_flatten
(
labels
[
'attribute_targets'
][
head
.
name
],
last_dim
=
head
.
size
)
y_pred_att
=
keras_cv
.
losse
s
.
multi_level_flatten
(
y_pred_att
=
loss_util
s
.
multi_level_flatten
(
outputs
[
'attribute_outputs'
][
head
.
name
],
last_dim
=
head
.
size
)
if
head
.
type
==
'regression'
:
att_loss_fn
=
tf
.
keras
.
losses
.
Huber
(
...
...
@@ -180,7 +182,7 @@ class RetinaNetTask(base_task.Task):
params
=
self
.
task_config
attribute_heads
=
self
.
task_config
.
model
.
head
.
attribute_heads
cls_loss_fn
=
keras_cv
.
loss
es
.
FocalLoss
(
cls_loss_fn
=
focal_
loss
.
FocalLoss
(
alpha
=
params
.
losses
.
focal_loss_alpha
,
gamma
=
params
.
losses
.
focal_loss_gamma
,
reduction
=
tf
.
keras
.
losses
.
Reduction
.
SUM
)
...
...
@@ -194,14 +196,14 @@ class RetinaNetTask(base_task.Task):
num_positives
=
tf
.
reduce_sum
(
box_sample_weight
)
+
1.0
cls_sample_weight
=
cls_sample_weight
/
num_positives
box_sample_weight
=
box_sample_weight
/
num_positives
y_true_cls
=
keras_cv
.
losse
s
.
multi_level_flatten
(
y_true_cls
=
loss_util
s
.
multi_level_flatten
(
labels
[
'cls_targets'
],
last_dim
=
None
)
y_true_cls
=
tf
.
one_hot
(
y_true_cls
,
params
.
model
.
num_classes
)
y_pred_cls
=
keras_cv
.
losse
s
.
multi_level_flatten
(
y_pred_cls
=
loss_util
s
.
multi_level_flatten
(
outputs
[
'cls_outputs'
],
last_dim
=
params
.
model
.
num_classes
)
y_true_box
=
keras_cv
.
losse
s
.
multi_level_flatten
(
y_true_box
=
loss_util
s
.
multi_level_flatten
(
labels
[
'box_targets'
],
last_dim
=
4
)
y_pred_box
=
keras_cv
.
losse
s
.
multi_level_flatten
(
y_pred_box
=
loss_util
s
.
multi_level_flatten
(
outputs
[
'box_outputs'
],
last_dim
=
4
)
cls_loss
=
cls_loss_fn
(
...
...
official/vision/detection/dataloader/anchor.py
View file @
c57e975a
...
...
@@ -21,13 +21,13 @@ from __future__ import print_function
import
collections
import
tensorflow
as
tf
from
official.vision
import
keras_cv
from
official.vision
.beta.ops
import
iou_similarity
from
official.vision.detection.utils
import
box_utils
from
official.vision.
detection.
utils.object_detection
import
argmax_matcher
from
official.vision.
detection.
utils.object_detection
import
balanced_positive_negative_sampler
from
official.vision.
detection.
utils.object_detection
import
box_list
from
official.vision.
detection.
utils.object_detection
import
faster_rcnn_box_coder
from
official.vision.
detection.
utils.object_detection
import
target_assigner
from
official.vision.utils.object_detection
import
argmax_matcher
from
official.vision.utils.object_detection
import
balanced_positive_negative_sampler
from
official.vision.utils.object_detection
import
box_list
from
official.vision.utils.object_detection
import
faster_rcnn_box_coder
from
official.vision.utils.object_detection
import
target_assigner
class
Anchor
(
object
):
...
...
@@ -135,7 +135,7 @@ class AnchorLabeler(object):
upper-bound threshold to assign negative labels for anchors. An anchor
with a score below the threshold is labeled negative.
"""
similarity_calc
=
keras_cv
.
ops
.
IouSimilarity
()
similarity_calc
=
iou_similarity
.
IouSimilarity
()
matcher
=
argmax_matcher
.
ArgMaxMatcher
(
match_threshold
,
unmatched_threshold
=
unmatched_threshold
,
...
...
@@ -341,7 +341,7 @@ class OlnAnchorLabeler(RpnAnchorLabeler):
unmatched_threshold
=
unmatched_threshold
,
rpn_batch_size_per_im
=
rpn_batch_size_per_im
,
rpn_fg_fraction
=
rpn_fg_fraction
)
similarity_calc
=
keras_cv
.
ops
.
IouSimilarity
()
similarity_calc
=
iou_similarity
.
IouSimilarity
()
matcher
=
argmax_matcher
.
ArgMaxMatcher
(
match_threshold
,
unmatched_threshold
=
unmatched_threshold
,
...
...
official/vision/detection/dataloader/input_reader.py
View file @
c57e975a
...
...
@@ -16,7 +16,6 @@
from
__future__
import
absolute_import
from
__future__
import
division
# from __future__ import google_type_annotations
from
__future__
import
print_function
import
tensorflow
as
tf
...
...
official/vision/detection/executor/detection_executor.py
View file @
c57e975a
...
...
@@ -16,14 +16,13 @@
from
__future__
import
absolute_import
from
__future__
import
division
# from __future__ import google_type_annotations
from
__future__
import
print_function
from
absl
import
logging
import
tensorflow
as
tf
from
official.vision.detection.executor
import
distributed_executor
as
executor
from
official.vision.
detection.
utils.object_detection
import
visualization_utils
from
official.vision.utils.object_detection
import
visualization_utils
class
DetectionDistributedExecutor
(
executor
.
DistributedExecutor
):
...
...
official/vision/detection/executor/distributed_executor.py
View file @
c57e975a
...
...
@@ -16,7 +16,6 @@
from
__future__
import
absolute_import
from
__future__
import
division
# from __future__ import google_type_annotations
from
__future__
import
print_function
import
os
...
...
official/vision/detection/modeling/architecture/nn_blocks.py
View file @
c57e975a
...
...
@@ -23,7 +23,6 @@ import tensorflow as tf
from
official.modeling
import
tf_utils
@
tf
.
keras
.
utils
.
register_keras_serializable
(
package
=
'Vision'
)
class
ResidualBlock
(
tf
.
keras
.
layers
.
Layer
):
"""A residual block."""
...
...
@@ -163,7 +162,6 @@ class ResidualBlock(tf.keras.layers.Layer):
return
self
.
_activation_fn
(
x
+
shortcut
)
@
tf
.
keras
.
utils
.
register_keras_serializable
(
package
=
'Vision'
)
class
BottleneckBlock
(
tf
.
keras
.
layers
.
Layer
):
"""A standard bottleneck block."""
...
...
official/vision/detection/modeling/architecture/spinenet.py
View file @
c57e975a
...
...
@@ -113,7 +113,6 @@ def build_block_specs(block_specs=None):
return
[
BlockSpec
(
*
b
)
for
b
in
block_specs
]
@
tf
.
keras
.
utils
.
register_keras_serializable
(
package
=
'Vision'
)
class
SpineNet
(
tf
.
keras
.
Model
):
"""Class to build SpineNet models."""
...
...
official/vision/detection/ops/target_ops.py
View file @
c57e975a
...
...
@@ -22,7 +22,7 @@ import tensorflow as tf
from
official.vision.detection.ops
import
spatial_transform_ops
from
official.vision.detection.utils
import
box_utils
from
official.vision.
detection.
utils.object_detection
import
balanced_positive_negative_sampler
from
official.vision.utils.object_detection
import
balanced_positive_negative_sampler
def
box_matching
(
boxes
,
gt_boxes
,
gt_classes
):
...
...
official/vision/detection/utils/box_utils.py
View file @
c57e975a
...
...
@@ -16,7 +16,6 @@
from
__future__
import
absolute_import
from
__future__
import
division
# from __future__ import google_type_annotations
from
__future__
import
print_function
import
numpy
as
np
...
...
official/vision/detection/utils/input_utils.py
View file @
c57e975a
...
...
@@ -19,7 +19,7 @@ import math
import
tensorflow
as
tf
from
official.vision.detection.utils
import
box_utils
from
official.vision.
detection.
utils.object_detection
import
preprocessor
from
official.vision.utils.object_detection
import
preprocessor
def
pad_to_fixed_size
(
input_tensor
,
size
,
constant_values
=
0
):
...
...
official/vision/image_classification/augment.py
View file @
c57e975a
...
...
@@ -20,7 +20,6 @@ RandAugment Reference: https://arxiv.org/abs/1909.13719
from
__future__
import
absolute_import
from
__future__
import
division
# from __future__ import google_type_annotations
from
__future__
import
print_function
import
math
...
...
official/vision/image_classification/augment_test.py
View file @
c57e975a
...
...
@@ -16,7 +16,6 @@
from
__future__
import
absolute_import
from
__future__
import
division
# from __future__ import google_type_annotations
from
__future__
import
print_function
from
absl.testing
import
parameterized
...
...
official/vision/image_classification/callbacks.py
View file @
c57e975a
...
...
@@ -16,7 +16,6 @@
"""Common modules for callbacks."""
from
__future__
import
absolute_import
from
__future__
import
division
# from __future__ import google_type_annotations
from
__future__
import
print_function
import
os
...
...
official/vision/image_classification/configs/base_configs.py
View file @
c57e975a
...
...
@@ -14,19 +14,49 @@
# Lint as: python3
"""Definitions for high level configuration groups.."""
from
typing
import
Any
,
List
,
Mapping
,
Optional
import
dataclasses
from
typing
import
Any
,
List
,
Mapping
,
Optional
from
official.core
import
config_definitions
from
official.modeling
import
hyperparams
from
official.modeling.hyperparams
import
config_definitions
as
legacy_cfg
CallbacksConfig
=
legacy_cfg
.
CallbacksConfig
TensorboardConfig
=
legacy_cfg
.
TensorboardConfig
RuntimeConfig
=
config_definitions
.
RuntimeConfig
@
dataclasses
.
dataclass
class
TensorBoardConfig
(
hyperparams
.
Config
):
"""Configuration for TensorBoard.
Attributes:
track_lr: Whether or not to track the learning rate in TensorBoard. Defaults
to True.
write_model_weights: Whether or not to write the model weights as images in
TensorBoard. Defaults to False.
"""
track_lr
:
bool
=
True
write_model_weights
:
bool
=
False
@
dataclasses
.
dataclass
class
CallbacksConfig
(
hyperparams
.
Config
):
"""Configuration for Callbacks.
Attributes:
enable_checkpoint_and_export: Whether or not to enable checkpoints as a
Callback. Defaults to True.
enable_backup_and_restore: Whether or not to add BackupAndRestore
callback. Defaults to True.
enable_tensorboard: Whether or not to enable TensorBoard as a Callback.
Defaults to True.
enable_time_history: Whether or not to enable TimeHistory Callbacks.
Defaults to True.
"""
enable_checkpoint_and_export
:
bool
=
True
enable_backup_and_restore
:
bool
=
False
enable_tensorboard
:
bool
=
True
enable_time_history
:
bool
=
True
@
dataclasses
.
dataclass
class
ExportConfig
(
hyperparams
.
Config
):
"""Configuration for exports.
...
...
@@ -74,7 +104,7 @@ class TrainConfig(hyperparams.Config):
inferred based on the number of images and batch size. Defaults to None.
callbacks: An instance of CallbacksConfig.
metrics: An instance of MetricsConfig.
tensorboard: An instance of Tensor
b
oardConfig.
tensorboard: An instance of Tensor
B
oardConfig.
set_epoch_loop: Whether or not to set `steps_per_execution` to
equal the number of training steps in `model.compile`. This reduces the
number of callbacks run per epoch which significantly improves end-to-end
...
...
@@ -85,7 +115,7 @@ class TrainConfig(hyperparams.Config):
steps
:
int
=
None
callbacks
:
CallbacksConfig
=
CallbacksConfig
()
metrics
:
MetricsConfig
=
None
tensorboard
:
Tensor
b
oardConfig
=
Tensor
b
oardConfig
()
tensorboard
:
Tensor
B
oardConfig
=
Tensor
B
oardConfig
()
time_history
:
TimeHistoryConfig
=
TimeHistoryConfig
()
set_epoch_loop
:
bool
=
False
...
...
official/vision/image_classification/configs/configs.py
View file @
c57e975a
...
...
@@ -52,7 +52,7 @@ class EfficientNetImageNetConfig(base_configs.ExperimentConfig):
enable_checkpoint_and_export
=
True
,
enable_tensorboard
=
True
),
metrics
=
[
'accuracy'
,
'top_5'
],
time_history
=
base_configs
.
TimeHistoryConfig
(
log_steps
=
100
),
tensorboard
=
base_configs
.
Tensor
b
oardConfig
(
tensorboard
=
base_configs
.
Tensor
B
oardConfig
(
track_lr
=
True
,
write_model_weights
=
False
),
set_epoch_loop
=
False
)
evaluation
:
base_configs
.
EvalConfig
=
base_configs
.
EvalConfig
(
...
...
@@ -84,7 +84,7 @@ class ResNetImagenetConfig(base_configs.ExperimentConfig):
enable_checkpoint_and_export
=
True
,
enable_tensorboard
=
True
),
metrics
=
[
'accuracy'
,
'top_5'
],
time_history
=
base_configs
.
TimeHistoryConfig
(
log_steps
=
100
),
tensorboard
=
base_configs
.
Tensor
b
oardConfig
(
tensorboard
=
base_configs
.
Tensor
B
oardConfig
(
track_lr
=
True
,
write_model_weights
=
False
),
set_epoch_loop
=
False
)
evaluation
:
base_configs
.
EvalConfig
=
base_configs
.
EvalConfig
(
...
...
official/vision/image_classification/dataset_factory.py
View file @
c57e975a
...
...
@@ -16,7 +16,6 @@
"""Dataset utilities for vision tasks using TFDS and tf.data.Dataset."""
from
__future__
import
absolute_import
from
__future__
import
division
# from __future__ import google_type_annotations
from
__future__
import
print_function
import
os
...
...
official/vision/image_classification/efficientnet/common_modules.py
View file @
c57e975a
...
...
@@ -15,7 +15,6 @@
"""Common modeling utilities."""
from
__future__
import
absolute_import
from
__future__
import
division
# from __future__ import google_type_annotations
from
__future__
import
print_function
import
numpy
as
np
...
...
official/vision/image_classification/efficientnet/tfhub_export.py
View file @
c57e975a
...
...
@@ -16,7 +16,6 @@
from
__future__
import
absolute_import
from
__future__
import
division
# from __future__ import google_type_annotations
from
__future__
import
print_function
import
os
...
...
Prev
1
…
9
10
11
12
13
14
15
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment