Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ModelZoo
ResNet50_tensorflow
Commits
b92025a9
Commit
b92025a9
authored
Aug 18, 2021
by
anivegesana
Browse files
Merge branch 'master' of
https://github.com/tensorflow/models
into detection_generator_pr_2
parents
1b425791
37536370
Changes
108
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
191 additions
and
307 deletions
+191
-307
official/vision/beta/projects/simclr/heads/simclr_head.py
official/vision/beta/projects/simclr/heads/simclr_head.py
+1
-15
official/vision/beta/projects/simclr/heads/simclr_head_test.py
...ial/vision/beta/projects/simclr/heads/simclr_head_test.py
+0
-16
official/vision/beta/projects/simclr/losses/contrastive_losses.py
.../vision/beta/projects/simclr/losses/contrastive_losses.py
+0
-15
official/vision/beta/projects/simclr/losses/contrastive_losses_test.py
...on/beta/projects/simclr/losses/contrastive_losses_test.py
+0
-16
official/vision/beta/projects/simclr/modeling/layers/nn_blocks.py
.../vision/beta/projects/simclr/modeling/layers/nn_blocks.py
+0
-16
official/vision/beta/projects/simclr/modeling/layers/nn_blocks_test.py
...on/beta/projects/simclr/modeling/layers/nn_blocks_test.py
+0
-16
official/vision/beta/projects/simclr/modeling/multitask_model.py
...l/vision/beta/projects/simclr/modeling/multitask_model.py
+1
-1
official/vision/beta/projects/simclr/modeling/multitask_model_test.py
...ion/beta/projects/simclr/modeling/multitask_model_test.py
+6
-4
official/vision/beta/projects/simclr/modeling/simclr_model.py
...cial/vision/beta/projects/simclr/modeling/simclr_model.py
+5
-20
official/vision/beta/projects/simclr/modeling/simclr_model_test.py
...vision/beta/projects/simclr/modeling/simclr_model_test.py
+1
-16
official/vision/beta/projects/simclr/tasks/simclr.py
official/vision/beta/projects/simclr/tasks/simclr.py
+48
-58
official/vision/beta/projects/simclr/train.py
official/vision/beta/projects/simclr/train.py
+1
-16
official/vision/beta/projects/yolo/configs/darknet_classification.py
...sion/beta/projects/yolo/configs/darknet_classification.py
+1
-2
official/vision/beta/projects/yolo/modeling/yolo_model.py
official/vision/beta/projects/yolo/modeling/yolo_model.py
+70
-59
official/vision/beta/projects/yolo/ops/box_ops.py
official/vision/beta/projects/yolo/ops/box_ops.py
+34
-23
official/vision/beta/projects/yolo/ops/box_ops_test.py
official/vision/beta/projects/yolo/ops/box_ops_test.py
+1
-0
official/vision/beta/projects/yolo/ops/math_ops.py
official/vision/beta/projects/yolo/ops/math_ops.py
+21
-5
official/vision/beta/projects/yolo/ops/preprocess_ops_test.py
...cial/vision/beta/projects/yolo/ops/preprocess_ops_test.py
+1
-0
official/vision/beta/serving/detection.py
official/vision/beta/serving/detection.py
+0
-2
official/vision/beta/serving/detection_test.py
official/vision/beta/serving/detection_test.py
+0
-7
No files found.
official/vision/beta/projects/simclr/heads/simclr_head.py
View file @
b92025a9
...
...
@@ -12,21 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Dense prediction heads."""
"""SimCLR prediction heads."""
from
typing
import
Text
,
Optional
...
...
official/vision/beta/projects/simclr/heads/simclr_head_test.py
View file @
b92025a9
...
...
@@ -12,22 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from
absl.testing
import
parameterized
import
numpy
as
np
...
...
official/vision/beta/projects/simclr/losses/contrastive_losses.py
View file @
b92025a9
...
...
@@ -12,21 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contrastive loss functions."""
import
functools
...
...
official/vision/beta/projects/simclr/losses/contrastive_losses_test.py
View file @
b92025a9
...
...
@@ -12,22 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from
absl.testing
import
parameterized
import
numpy
as
np
...
...
official/vision/beta/projects/simclr/modeling/layers/nn_blocks.py
View file @
b92025a9
...
...
@@ -12,22 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains common building blocks for simclr neural networks."""
from
typing
import
Text
,
Optional
...
...
official/vision/beta/projects/simclr/modeling/layers/nn_blocks_test.py
View file @
b92025a9
...
...
@@ -12,22 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from
absl.testing
import
parameterized
import
tensorflow
as
tf
...
...
official/vision/beta/projects/simclr/modeling/multitask_model.py
View file @
b92025a9
...
...
@@ -84,7 +84,7 @@ class SimCLRMTModel(base_model.MultiTaskBaseModel):
else
:
supervised_head
=
None
tasks
[
model_config
.
mod
e
]
=
simclr_model
.
SimCLRModel
(
tasks
[
model_config
.
task_nam
e
]
=
simclr_model
.
SimCLRModel
(
input_specs
=
self
.
_input_specs
,
backbone
=
self
.
_backbone
,
projection_head
=
projection_head
,
...
...
official/vision/beta/projects/simclr/modeling/multitask_model_test.py
View file @
b92025a9
...
...
@@ -29,11 +29,13 @@ class MultitaskModelTest(tf.test.TestCase):
ckpt_dir
=
self
.
get_temp_dir
()
config
=
multitask_config
.
SimCLRMTModelConfig
(
input_size
=
[
64
,
64
,
3
],
heads
=
(
multitask_config
.
SimCLRMTHeadConfig
(
mode
=
simclr_model
.
PRETRAIN
),
multitask_config
.
SimCLRMTHeadConfig
(
mode
=
simclr_model
.
FINETUNE
)))
heads
=
(
multitask_config
.
SimCLRMTHeadConfig
(
mode
=
simclr_model
.
PRETRAIN
,
task_name
=
'pretrain_simclr'
),
multitask_config
.
SimCLRMTHeadConfig
(
mode
=
simclr_model
.
FINETUNE
,
task_name
=
'finetune_simclr'
)))
model
=
multitask_model
.
SimCLRMTModel
(
config
)
self
.
assertIn
(
simclr_model
.
PRETRAIN
,
model
.
sub_tasks
)
self
.
assertIn
(
simclr_model
.
FINETUNE
,
model
.
sub_tasks
)
self
.
assertIn
(
'pretrain_simclr'
,
model
.
sub_tasks
)
self
.
assertIn
(
'finetune_simclr'
,
model
.
sub_tasks
)
ckpt
=
tf
.
train
.
Checkpoint
(
backbone
=
model
.
_backbone
)
ckpt
.
save
(
os
.
path
.
join
(
ckpt_dir
,
'ckpt'
))
model
.
initialize
()
...
...
official/vision/beta/projects/simclr/modeling/simclr_model.py
View file @
b92025a9
...
...
@@ -12,22 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Build simclr models."""
from
typing
import
Optional
from
absl
import
logging
...
...
@@ -133,12 +118,12 @@ class SimCLRModel(tf.keras.Model):
def
checkpoint_items
(
self
):
"""Returns a dictionary of items to be additionally checkpointed."""
if
self
.
_supervised_head
is
not
None
:
items
=
dict
(
backbone
=
self
.
backbone
,
projection_head
=
self
.
projection_head
,
supervised_head
=
self
.
supervised_head
)
items
=
dict
(
backbone
=
self
.
backbone
,
projection_head
=
self
.
projection_head
,
supervised_head
=
self
.
supervised_head
)
else
:
items
=
dict
(
backbone
=
self
.
backbone
,
projection_head
=
self
.
projection_head
)
items
=
dict
(
backbone
=
self
.
backbone
,
projection_head
=
self
.
projection_head
)
return
items
@
property
...
...
official/vision/beta/projects/simclr/modeling/simclr_model_test.py
View file @
b92025a9
...
...
@@ -12,22 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test for SimCLR model."""
from
absl.testing
import
parameterized
import
numpy
as
np
...
...
official/vision/beta/projects/simclr/tasks/simclr.py
View file @
b92025a9
...
...
@@ -12,21 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Image SimCLR task definition.
SimCLR training two different modes:
...
...
@@ -39,7 +24,6 @@ the task definition:
- training loss
- projection_head and/or supervised_head
"""
from
typing
import
Dict
,
Optional
from
absl
import
logging
...
...
@@ -67,7 +51,8 @@ RuntimeConfig = config_definitions.RuntimeConfig
class
SimCLRPretrainTask
(
base_task
.
Task
):
"""A task for image classification."""
def
create_optimizer
(
self
,
optimizer_config
:
OptimizationConfig
,
def
create_optimizer
(
self
,
optimizer_config
:
OptimizationConfig
,
runtime_config
:
Optional
[
RuntimeConfig
]
=
None
):
"""Creates an TF optimizer from configurations.
...
...
@@ -78,8 +63,8 @@ class SimCLRPretrainTask(base_task.Task):
Returns:
A tf.optimizers.Optimizer object.
"""
if
(
optimizer_config
.
optimizer
.
type
==
'lars'
and
self
.
task_config
.
loss
.
l2_weight_decay
>
0.0
):
if
(
optimizer_config
.
optimizer
.
type
==
'lars'
and
self
.
task_config
.
loss
.
l2_weight_decay
>
0.0
):
raise
ValueError
(
'The l2_weight_decay cannot be used together with lars '
'optimizer. Please set it to 0.'
)
...
...
@@ -97,15 +82,16 @@ class SimCLRPretrainTask(base_task.Task):
def
build_model
(
self
):
model_config
=
self
.
task_config
.
model
input_specs
=
tf
.
keras
.
layers
.
InputSpec
(
shape
=
[
None
]
+
model_config
.
input_size
)
input_specs
=
tf
.
keras
.
layers
.
InputSpec
(
shape
=
[
None
]
+
model_config
.
input_size
)
l2_weight_decay
=
self
.
task_config
.
loss
.
l2_weight_decay
# Divide weight decay by 2.0 to match the implementation of tf.nn.l2_loss.
# (https://www.tensorflow.org/api_docs/python/tf/keras/regularizers/l2)
# (https://www.tensorflow.org/api_docs/python/tf/nn/l2_loss)
l2_regularizer
=
(
tf
.
keras
.
regularizers
.
l2
(
l2_weight_decay
/
2.0
)
if
l2_weight_decay
else
None
)
l2_regularizer
=
(
tf
.
keras
.
regularizers
.
l2
(
l2_weight_decay
/
2.0
)
if
l2_weight_decay
else
None
)
# Build backbone
backbone
=
backbones
.
factory
.
build_backbone
(
...
...
@@ -220,8 +206,7 @@ class SimCLRPretrainTask(base_task.Task):
projection_outputs
=
model_outputs
[
simclr_model
.
PROJECTION_OUTPUT_KEY
]
projection1
,
projection2
=
tf
.
split
(
projection_outputs
,
2
,
0
)
contrast_loss
,
(
contrast_logits
,
contrast_labels
)
=
con_losses_obj
(
projection1
=
projection1
,
projection2
=
projection2
)
projection1
=
projection1
,
projection2
=
projection2
)
contrast_accuracy
=
tf
.
equal
(
tf
.
argmax
(
contrast_labels
,
axis
=
1
),
tf
.
argmax
(
contrast_logits
,
axis
=
1
))
...
...
@@ -253,8 +238,8 @@ class SimCLRPretrainTask(base_task.Task):
outputs
)
sup_loss
=
tf
.
reduce_mean
(
sup_loss
)
label_acc
=
tf
.
equal
(
tf
.
argmax
(
labels
,
axis
=
1
),
tf
.
argmax
(
outputs
,
axis
=
1
))
label_acc
=
tf
.
equal
(
tf
.
argmax
(
labels
,
axis
=
1
),
tf
.
argmax
(
outputs
,
axis
=
1
))
label_acc
=
tf
.
reduce_mean
(
tf
.
cast
(
label_acc
,
tf
.
float32
))
model_loss
=
contrast_loss
+
sup_loss
...
...
@@ -278,10 +263,7 @@ class SimCLRPretrainTask(base_task.Task):
if
training
:
metrics
=
[]
metric_names
=
[
'total_loss'
,
'contrast_loss'
,
'contrast_accuracy'
,
'contrast_entropy'
'total_loss'
,
'contrast_loss'
,
'contrast_accuracy'
,
'contrast_entropy'
]
if
self
.
task_config
.
model
.
supervised_head
:
metric_names
.
extend
([
'supervised_loss'
,
'accuracy'
])
...
...
@@ -293,18 +275,26 @@ class SimCLRPretrainTask(base_task.Task):
metrics
=
[
tf
.
keras
.
metrics
.
CategoricalAccuracy
(
name
=
'accuracy'
),
tf
.
keras
.
metrics
.
TopKCategoricalAccuracy
(
k
=
k
,
name
=
'top_{}_accuracy'
.
format
(
k
))]
k
=
k
,
name
=
'top_{}_accuracy'
.
format
(
k
))
]
else
:
metrics
=
[
tf
.
keras
.
metrics
.
SparseCategoricalAccuracy
(
name
=
'accuracy'
),
tf
.
keras
.
metrics
.
SparseTopKCategoricalAccuracy
(
k
=
k
,
name
=
'top_{}_accuracy'
.
format
(
k
))]
k
=
k
,
name
=
'top_{}_accuracy'
.
format
(
k
))
]
return
metrics
def
train_step
(
self
,
inputs
,
model
,
optimizer
,
metrics
=
None
):
features
,
labels
=
inputs
if
(
self
.
task_config
.
model
.
supervised_head
is
not
None
and
self
.
task_config
.
evaluation
.
one_hot
):
# To do a sanity check that we absolutely use no labels when pretraining, we
# can set the labels here to zero.
if
self
.
task_config
.
train_data
.
input_set_label_to_zero
:
labels
*=
0
if
(
self
.
task_config
.
model
.
supervised_head
is
not
None
and
self
.
task_config
.
evaluation
.
one_hot
):
num_classes
=
self
.
task_config
.
model
.
supervised_head
.
num_classes
labels
=
tf
.
one_hot
(
labels
,
num_classes
)
...
...
@@ -313,8 +303,7 @@ class SimCLRPretrainTask(base_task.Task):
outputs
=
model
(
features
,
training
=
True
)
# Casting output layer as float32 is necessary when mixed_precision is
# mixed_float16 or mixed_bfloat16 to ensure output is casted as float32.
outputs
=
tf
.
nest
.
map_structure
(
lambda
x
:
tf
.
cast
(
x
,
tf
.
float32
),
outputs
)
outputs
=
tf
.
nest
.
map_structure
(
lambda
x
:
tf
.
cast
(
x
,
tf
.
float32
),
outputs
)
# Computes per-replica loss.
losses
=
self
.
build_losses
(
...
...
@@ -373,7 +362,8 @@ class SimCLRPretrainTask(base_task.Task):
class
SimCLRFinetuneTask
(
base_task
.
Task
):
"""A task for image classification."""
def
create_optimizer
(
self
,
optimizer_config
:
OptimizationConfig
,
def
create_optimizer
(
self
,
optimizer_config
:
OptimizationConfig
,
runtime_config
:
Optional
[
RuntimeConfig
]
=
None
):
"""Creates an TF optimizer from configurations.
...
...
@@ -384,8 +374,8 @@ class SimCLRFinetuneTask(base_task.Task):
Returns:
A tf.optimizers.Optimizer object.
"""
if
(
optimizer_config
.
optimizer
.
type
==
'lars'
and
self
.
task_config
.
loss
.
l2_weight_decay
>
0.0
):
if
(
optimizer_config
.
optimizer
.
type
==
'lars'
and
self
.
task_config
.
loss
.
l2_weight_decay
>
0.0
):
raise
ValueError
(
'The l2_weight_decay cannot be used together with lars '
'optimizer. Please set it to 0.'
)
...
...
@@ -403,15 +393,16 @@ class SimCLRFinetuneTask(base_task.Task):
def
build_model
(
self
):
model_config
=
self
.
task_config
.
model
input_specs
=
tf
.
keras
.
layers
.
InputSpec
(
shape
=
[
None
]
+
model_config
.
input_size
)
input_specs
=
tf
.
keras
.
layers
.
InputSpec
(
shape
=
[
None
]
+
model_config
.
input_size
)
l2_weight_decay
=
self
.
task_config
.
loss
.
l2_weight_decay
# Divide weight decay by 2.0 to match the implementation of tf.nn.l2_loss.
# (https://www.tensorflow.org/api_docs/python/tf/keras/regularizers/l2)
# (https://www.tensorflow.org/api_docs/python/tf/nn/l2_loss)
l2_regularizer
=
(
tf
.
keras
.
regularizers
.
l2
(
l2_weight_decay
/
2.0
)
if
l2_weight_decay
else
None
)
l2_regularizer
=
(
tf
.
keras
.
regularizers
.
l2
(
l2_weight_decay
/
2.0
)
if
l2_weight_decay
else
None
)
backbone
=
backbones
.
factory
.
build_backbone
(
input_specs
=
input_specs
,
...
...
@@ -467,8 +458,8 @@ class SimCLRFinetuneTask(base_task.Task):
status
=
ckpt
.
restore
(
ckpt_dir_or_file
)
status
.
assert_consumed
()
elif
self
.
task_config
.
init_checkpoint_modules
==
'backbone_projection'
:
ckpt
=
tf
.
train
.
Checkpoint
(
backbone
=
model
.
backbone
,
projection_head
=
model
.
projection_head
)
ckpt
=
tf
.
train
.
Checkpoint
(
backbone
=
model
.
backbone
,
projection_head
=
model
.
projection_head
)
status
=
ckpt
.
restore
(
ckpt_dir_or_file
)
status
.
expect_partial
().
assert_existing_objects_matched
()
elif
self
.
task_config
.
init_checkpoint_modules
==
'backbone'
:
...
...
@@ -542,12 +533,14 @@ class SimCLRFinetuneTask(base_task.Task):
metrics
=
[
tf
.
keras
.
metrics
.
CategoricalAccuracy
(
name
=
'accuracy'
),
tf
.
keras
.
metrics
.
TopKCategoricalAccuracy
(
k
=
k
,
name
=
'top_{}_accuracy'
.
format
(
k
))]
k
=
k
,
name
=
'top_{}_accuracy'
.
format
(
k
))
]
else
:
metrics
=
[
tf
.
keras
.
metrics
.
SparseCategoricalAccuracy
(
name
=
'accuracy'
),
tf
.
keras
.
metrics
.
SparseTopKCategoricalAccuracy
(
k
=
k
,
name
=
'top_{}_accuracy'
.
format
(
k
))]
k
=
k
,
name
=
'top_{}_accuracy'
.
format
(
k
))
]
return
metrics
def
train_step
(
self
,
inputs
,
model
,
optimizer
,
metrics
=
None
):
...
...
@@ -577,16 +570,14 @@ class SimCLRFinetuneTask(base_task.Task):
# Computes per-replica loss.
loss
=
self
.
build_losses
(
model_outputs
=
outputs
,
labels
=
labels
,
aux_losses
=
model
.
losses
)
model_outputs
=
outputs
,
labels
=
labels
,
aux_losses
=
model
.
losses
)
# Scales loss as the default gradients allreduce performs sum inside the
# optimizer.
scaled_loss
=
loss
/
num_replicas
# For mixed_precision policy, when LossScaleOptimizer is used, loss is
# scaled for numerical stability.
if
isinstance
(
optimizer
,
tf
.
keras
.
mixed_precision
.
LossScaleOptimizer
):
if
isinstance
(
optimizer
,
tf
.
keras
.
mixed_precision
.
LossScaleOptimizer
):
scaled_loss
=
optimizer
.
get_scaled_loss
(
scaled_loss
)
tvars
=
model
.
trainable_variables
...
...
@@ -596,8 +587,7 @@ class SimCLRFinetuneTask(base_task.Task):
grads
=
tape
.
gradient
(
scaled_loss
,
tvars
)
# Scales back gradient before apply_gradients when LossScaleOptimizer is
# used.
if
isinstance
(
optimizer
,
tf
.
keras
.
mixed_precision
.
LossScaleOptimizer
):
if
isinstance
(
optimizer
,
tf
.
keras
.
mixed_precision
.
LossScaleOptimizer
):
grads
=
optimizer
.
get_unscaled_gradients
(
grads
)
optimizer
.
apply_gradients
(
list
(
zip
(
grads
,
tvars
)))
...
...
@@ -626,11 +616,11 @@ class SimCLRFinetuneTask(base_task.Task):
num_classes
=
self
.
task_config
.
model
.
supervised_head
.
num_classes
labels
=
tf
.
one_hot
(
labels
,
num_classes
)
outputs
=
self
.
inference_step
(
features
,
model
)[
simclr_model
.
SUPERVISED_OUTPUT_KEY
]
outputs
=
self
.
inference_step
(
features
,
model
)[
simclr_model
.
SUPERVISED_OUTPUT_KEY
]
outputs
=
tf
.
nest
.
map_structure
(
lambda
x
:
tf
.
cast
(
x
,
tf
.
float32
),
outputs
)
loss
=
self
.
build_losses
(
model_outputs
=
outputs
,
labels
=
labels
,
aux_losses
=
model
.
losses
)
loss
=
self
.
build_losses
(
model_outputs
=
outputs
,
labels
=
labels
,
aux_losses
=
model
.
losses
)
logs
=
{
self
.
loss
:
loss
}
if
metrics
:
...
...
official/vision/beta/projects/simclr/train.py
View file @
b92025a9
...
...
@@ -12,22 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow Model Garden Vision SimCLR training driver."""
"""TensorFlow Model Garden Vision SimCLR trainer."""
from
absl
import
app
from
absl
import
flags
import
gin
...
...
official/vision/beta/projects/yolo/configs/darknet_classification.py
View file @
b92025a9
...
...
@@ -15,9 +15,8 @@
# Lint as: python3
"""Image classification with darknet configs."""
from
typing
import
List
,
Optional
import
dataclasses
from
typing
import
List
,
Optional
from
official.core
import
config_definitions
as
cfg
from
official.core
import
exp_factory
...
...
official/vision/beta/projects/yolo/modeling/yolo_model.py
View file @
b92025a9
from
official.core
import
registry
# pylint: disable=unused-import
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Yolo models."""
import
tensorflow
as
tf
import
tensorflow.keras
as
ks
from
typing
import
*
from
official.vision.beta.projects.yolo.modeling.backbones.darknet
import
\
Darknet
from
official.vision.beta.projects.yolo.modeling.decoders.yolo_decoder
import
\
YoloDecoder
from
official.vision.beta.projects.yolo.modeling.heads.yolo_head
import
YoloHead
#
s
tatic base Yolo Models that do not require configuration
#
S
tatic base Yolo Models that do not require configuration
# similar to a backbone model id.
# this is done greatly simplify the model config
# the structure is as follows. model version, {v3, v4, v#, ... etc}
# the model config type {regular, tiny, small, large, ... etc}
YOLO_MODELS
=
{
"v4"
:
dict
(
regular
=
dict
(
embed_spp
=
False
,
use_fpn
=
True
,
max_level_process_len
=
None
,
path_process_len
=
6
),
tiny
=
dict
(
embed_spp
=
False
,
use_fpn
=
False
,
max_level_process_len
=
2
,
path_process_len
=
1
),
csp
=
dict
(
embed_spp
=
False
,
use_fpn
=
True
,
max_level_process_len
=
None
,
csp_stack
=
5
,
fpn_depth
=
5
,
path_process_len
=
6
),
csp_large
=
dict
(
embed_spp
=
False
,
use_fpn
=
True
,
max_level_process_len
=
None
,
csp_stack
=
7
,
fpn_depth
=
7
,
path_process_len
=
8
,
fpn_filter_scale
=
2
),
),
"v3"
:
dict
(
regular
=
dict
(
embed_spp
=
False
,
use_fpn
=
False
,
max_level_process_len
=
None
,
path_process_len
=
6
),
tiny
=
dict
(
embed_spp
=
False
,
use_fpn
=
False
,
max_level_process_len
=
2
,
path_process_len
=
1
),
spp
=
dict
(
embed_spp
=
True
,
use_fpn
=
False
,
max_level_process_len
=
2
,
path_process_len
=
1
),
),
"v4"
:
dict
(
regular
=
dict
(
embed_spp
=
False
,
use_fpn
=
True
,
max_level_process_len
=
None
,
path_process_len
=
6
),
tiny
=
dict
(
embed_spp
=
False
,
use_fpn
=
False
,
max_level_process_len
=
2
,
path_process_len
=
1
),
csp
=
dict
(
embed_spp
=
False
,
use_fpn
=
True
,
max_level_process_len
=
None
,
csp_stack
=
5
,
fpn_depth
=
5
,
path_process_len
=
6
),
csp_large
=
dict
(
embed_spp
=
False
,
use_fpn
=
True
,
max_level_process_len
=
None
,
csp_stack
=
7
,
fpn_depth
=
7
,
path_process_len
=
8
,
fpn_filter_scale
=
2
),
),
"v3"
:
dict
(
regular
=
dict
(
embed_spp
=
False
,
use_fpn
=
False
,
max_level_process_len
=
None
,
path_process_len
=
6
),
tiny
=
dict
(
embed_spp
=
False
,
use_fpn
=
False
,
max_level_process_len
=
2
,
path_process_len
=
1
),
spp
=
dict
(
embed_spp
=
True
,
use_fpn
=
False
,
max_level_process_len
=
2
,
path_process_len
=
1
),
),
}
class
Yolo
(
k
s
.
Model
):
class
Yolo
(
tf
.
kera
s
.
Model
):
"""The YOLO model class."""
def
__init__
(
self
,
backbone
=
None
,
decoder
=
None
,
head
=
None
,
filte
r
=
None
,
detection_generato
r
=
None
,
**
kwargs
):
"""Detection initialization function.
Args:
backbone: `tf.keras.Model`, a backbone network.
decoder: `tf.keras.Model`, a decoder network.
...
...
@@ -83,10 +94,10 @@ class Yolo(ks.Model):
super
().
__init__
(
**
kwargs
)
self
.
_config_dict
=
{
'
backbone
'
:
backbone
,
'
decoder
'
:
decoder
,
'
head
'
:
head
,
'
detection_generator
'
:
detection_generator
"
backbone
"
:
backbone
,
"
decoder
"
:
decoder
,
"
head
"
:
head
,
"
detection_generator
"
:
detection_generator
}
# model components
...
...
official/vision/beta/projects/yolo/ops/box_ops.py
View file @
b92025a9
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Yolo box ops."""
import
math
import
tensorflow
as
tf
from
official.vision.beta.projects.yolo.ops
import
math_ops
import
math
def
yxyx_to_xcycwh
(
box
:
tf
.
Tensor
):
"""Converts boxes from ymin, xmin, ymax, xmax to x_center, y_center, width,
height.
"""Converts boxes from yxyx to x_center, y_center, width, height.
Args:
box: any `Tensor` whose last dimension is 4 representing the coordinates of
...
...
@@ -26,9 +40,7 @@ def yxyx_to_xcycwh(box: tf.Tensor):
@
tf
.
custom_gradient
def
_xcycwh_to_yxyx
(
box
:
tf
.
Tensor
,
scale
):
"""Private function called by xcycwh_to_yxyx to allow custom gradients
with defaults.
"""
"""Private function to allow custom gradients with defaults."""
with
tf
.
name_scope
(
'xcycwh_to_yxyx'
):
xy
,
wh
=
tf
.
split
(
box
,
2
,
axis
=-
1
)
xy_min
=
xy
-
wh
/
2
...
...
@@ -38,7 +50,7 @@ def _xcycwh_to_yxyx(box: tf.Tensor, scale):
box
=
tf
.
concat
([
y_min
,
x_min
,
y_max
,
x_max
],
axis
=-
1
)
def
delta
(
dbox
):
#y_min = top, x_min = left, y_max = bottom, x_max = right
#
y_min = top, x_min = left, y_max = bottom, x_max = right
dt
,
dl
,
db
,
dr
=
tf
.
split
(
dbox
,
4
,
axis
=-
1
)
dx
=
dl
+
dr
dy
=
dt
+
db
...
...
@@ -52,12 +64,12 @@ def _xcycwh_to_yxyx(box: tf.Tensor, scale):
def
xcycwh_to_yxyx
(
box
:
tf
.
Tensor
,
darknet
=
False
):
"""Converts boxes from x_center, y_center, width, height to ymin, xmin, ymax,
xmax.
"""Converts boxes from x_center, y_center, width, height to yxyx format.
Args:
box: any `Tensor` whose last dimension is 4 representing the coordinates of
boxes in x_center, y_center, width, height.
darknet: `bool`, if True a scale of 1.0 is used.
Returns:
box: a `Tensor` whose shape is the same as `box` in new format.
...
...
@@ -105,8 +117,7 @@ def intersect_and_union(box1, box2, yxyx=False):
def
smallest_encompassing_box
(
box1
,
box2
,
yxyx
=
False
):
"""Calculates the smallest box that encompasses both that encomapasses both
box1 and box2.
"""Calculates the smallest box that encompasses box1 and box2.
Args:
box1: any `Tensor` whose last dimension is 4 representing the coordinates of
...
...
@@ -194,7 +205,7 @@ def compute_giou(box1, box2, yxyx=False, darknet=False):
boxc
=
smallest_encompassing_box
(
box1
,
box2
,
yxyx
=
yxyx
)
if
yxyx
:
boxc
=
yxyx_to_xcycwh
(
boxc
)
cxcy
,
cwch
=
tf
.
split
(
boxc
,
2
,
axis
=-
1
)
_
,
cwch
=
tf
.
split
(
boxc
,
2
,
axis
=-
1
)
c
=
tf
.
math
.
reduce_prod
(
cwch
,
axis
=-
1
)
# compute giou
...
...
@@ -239,9 +250,9 @@ def compute_diou(box1, box2, beta=1.0, yxyx=False, darknet=False):
box1
=
yxyx_to_xcycwh
(
box1
)
box2
=
yxyx_to_xcycwh
(
box2
)
b1xy
,
b1wh
=
tf
.
split
(
box1
,
2
,
axis
=-
1
)
b2xy
,
b2wh
=
tf
.
split
(
box2
,
2
,
axis
=-
1
)
bcxy
,
bcwh
=
tf
.
split
(
boxc
,
2
,
axis
=-
1
)
b1xy
,
_
=
tf
.
split
(
box1
,
2
,
axis
=-
1
)
b2xy
,
_
=
tf
.
split
(
box2
,
2
,
axis
=-
1
)
_
,
bcwh
=
tf
.
split
(
boxc
,
2
,
axis
=-
1
)
center_dist
=
tf
.
reduce_sum
((
b1xy
-
b2xy
)
**
2
,
axis
=-
1
)
c_diag
=
tf
.
reduce_sum
(
bcwh
**
2
,
axis
=-
1
)
...
...
@@ -276,8 +287,8 @@ def compute_ciou(box1, box2, yxyx=False, darknet=False):
box1
=
yxyx_to_xcycwh
(
box1
)
box2
=
yxyx_to_xcycwh
(
box2
)
b1x
,
b1y
,
b1w
,
b1h
=
tf
.
split
(
box1
,
4
,
axis
=-
1
)
b2x
,
b2y
,
b2w
,
b2h
=
tf
.
split
(
box1
,
4
,
axis
=-
1
)
_
,
_
,
b1w
,
b1h
=
tf
.
split
(
box1
,
4
,
axis
=-
1
)
_
,
_
,
b2w
,
b2h
=
tf
.
split
(
box1
,
4
,
axis
=-
1
)
# computer aspect ratio consistency
terma
=
tf
.
cast
(
math_ops
.
divide_no_nan
(
b1w
,
b1h
),
tf
.
float32
)
...
...
@@ -292,13 +303,13 @@ def compute_ciou(box1, box2, yxyx=False, darknet=False):
return
iou
,
ciou
# equal to bbox_overlap but far more versitile
def
aggregated_comparitive_iou
(
boxes1
,
boxes2
=
None
,
iou_type
=
0
,
beta
=
0.6
):
"""Calculates the intersection over union between every box in boxes1 and
every box in boxes2.
"""Calculates the IOU between two set of boxes.
Similar to bbox_overlap but far more versitile.
Args:
boxes1: a `Tensor` of shape [batch size, N, 4] representing the coordinates
...
...
@@ -322,11 +333,11 @@ def aggregated_comparitive_iou(boxes1,
else
:
boxes2
=
tf
.
transpose
(
boxes1
,
perm
=
(
0
,
2
,
1
,
3
))
if
iou_type
==
0
:
#diou
if
iou_type
==
0
:
#
diou
_
,
iou
=
compute_diou
(
boxes1
,
boxes2
,
beta
=
beta
,
yxyx
=
True
)
elif
iou_type
==
1
:
#giou
elif
iou_type
==
1
:
#
giou
_
,
iou
=
compute_giou
(
boxes1
,
boxes2
,
yxyx
=
True
)
elif
iou_type
==
2
:
#ciou
elif
iou_type
==
2
:
#
ciou
_
,
iou
=
compute_ciou
(
boxes1
,
boxes2
,
yxyx
=
True
)
else
:
iou
=
compute_iou
(
boxes1
,
boxes2
,
yxyx
=
True
)
...
...
official/vision/beta/projects/yolo/ops/box_ops_test.py
View file @
b92025a9
...
...
@@ -12,6 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
"""box_ops tests."""
from
absl.testing
import
parameterized
import
numpy
as
np
import
tensorflow
as
tf
...
...
official/vision/beta/projects/yolo/ops/math_ops.py
100755 → 100644
View file @
b92025a9
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A set of private math operations used to safely implement the YOLO loss."""
import
tensorflow
as
tf
...
...
@@ -9,7 +23,7 @@ def rm_nan_inf(x, val=0.0):
x: any `Tensor` of any type.
val: value to replace nan and infinity with.
Return:
Return
s
:
a `Tensor` with nan and infinity removed.
"""
cond
=
tf
.
math
.
logical_or
(
tf
.
math
.
is_nan
(
x
),
tf
.
math
.
is_inf
(
x
))
...
...
@@ -25,7 +39,7 @@ def rm_nan(x, val=0.0):
x: any `Tensor` of any type.
val: value to replace nan.
Return:
Return
s
:
a `Tensor` with nan removed.
"""
cond
=
tf
.
math
.
is_nan
(
x
)
...
...
@@ -41,7 +55,7 @@ def divide_no_nan(a, b):
a: any `Tensor` of any type.
b: any `Tensor` of any type with the same shape as tensor a.
Return:
Return
s
:
a `Tensor` representing a divided by b, with all nan values removed.
"""
zero
=
tf
.
cast
(
0.0
,
b
.
dtype
)
...
...
@@ -49,7 +63,9 @@ def divide_no_nan(a, b):
def
mul_no_nan
(
x
,
y
):
"""Nan safe multiply operation built to allow model compilation in tflite and
"""Nan safe multiply operation.
Built to allow model compilation in tflite and
to allow one tensor to mask another. Where ever x is zero the
multiplication is not computed and the value is replaced with a zero. This is
required because 0 * nan = nan. This can make computation unstable in some
...
...
@@ -59,7 +75,7 @@ def mul_no_nan(x, y):
x: any `Tensor` of any type.
y: any `Tensor` of any type with the same shape as tensor x.
Return:
Return
s
:
a `Tensor` representing x times y, where x is used to safely mask the
tensor y.
"""
...
...
official/vision/beta/projects/yolo/ops/preprocess_ops_test.py
View file @
b92025a9
...
...
@@ -12,6 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
"""preprocess_ops tests."""
from
absl.testing
import
parameterized
import
numpy
as
np
import
tensorflow
as
tf
...
...
official/vision/beta/serving/detection.py
View file @
b92025a9
...
...
@@ -36,8 +36,6 @@ class DetectionModule(export_base.ExportModule):
if
self
.
_batch_size
is
None
:
raise
ValueError
(
'batch_size cannot be None for detection models.'
)
if
not
self
.
params
.
task
.
model
.
detection_generator
.
use_batched_nms
:
raise
ValueError
(
'Only batched_nms is supported.'
)
input_specs
=
tf
.
keras
.
layers
.
InputSpec
(
shape
=
[
self
.
_batch_size
]
+
self
.
_input_image_size
+
[
3
])
...
...
official/vision/beta/serving/detection_test.py
View file @
b92025a9
...
...
@@ -125,13 +125,6 @@ class DetectionExportTest(tf.test.TestCase, parameterized.TestCase):
detection
.
DetectionModule
(
params
,
batch_size
=
None
,
input_image_size
=
[
640
,
640
])
def
test_build_model_fail_with_batched_nms_false
(
self
):
params
=
exp_factory
.
get_exp_config
(
'retinanet_resnetfpn_coco'
)
params
.
task
.
model
.
detection_generator
.
use_batched_nms
=
False
with
self
.
assertRaisesRegex
(
ValueError
,
'Only batched_nms is supported.'
):
detection
.
DetectionModule
(
params
,
batch_size
=
1
,
input_image_size
=
[
640
,
640
])
if
__name__
==
'__main__'
:
tf
.
test
.
main
()
Prev
1
2
3
4
5
6
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment