Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ModelZoo
ResNet50_tensorflow
Commits
65bd772d
"...csrc/cpu/git@developer.sourcefind.cn:OpenDAS/vision.git" did not exist on "c5914452ef3629acbe0b9a72ff6847581bb89020"
Commit
65bd772d
authored
Jun 29, 2020
by
Kaushik Shivakumar
Browse files
style
parent
8e3eb8d4
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
35 additions
and
33 deletions
+35
-33
research/object_detection/meta_architectures/context_rcnn_lib.py
...h/object_detection/meta_architectures/context_rcnn_lib.py
+28
-26
research/object_detection/meta_architectures/context_rcnn_meta_arch.py
...ct_detection/meta_architectures/context_rcnn_meta_arch.py
+7
-7
No files found.
research/object_detection/meta_architectures/context_rcnn_lib.py
View file @
65bd772d
...
@@ -19,7 +19,6 @@ from __future__ import division
...
@@ -19,7 +19,6 @@ from __future__ import division
from
__future__
import
print_function
from
__future__
import
print_function
import
tensorflow.compat.v1
as
tf
import
tensorflow.compat.v1
as
tf
import
tf_slim
as
slim
# The negative value used in padding the invalid weights.
# The negative value used in padding the invalid weights.
_NEGATIVE_PADDING_VALUE
=
-
100000
_NEGATIVE_PADDING_VALUE
=
-
100000
...
@@ -30,23 +29,25 @@ QUERY_NAME = 'query'
...
@@ -30,23 +29,25 @@ QUERY_NAME = 'query'
FEATURE_NAME
=
'feature'
FEATURE_NAME
=
'feature'
class
ContextProjection
(
tf
.
keras
.
layers
.
Layer
):
class
ContextProjection
(
tf
.
keras
.
layers
.
Layer
):
"""Custom layer to do batch normalization and projection."""
def
__init__
(
self
,
projection_dimension
,
freeze_batchnorm
,
**
kwargs
):
def
__init__
(
self
,
projection_dimension
,
freeze_batchnorm
,
**
kwargs
):
self
.
batch_norm
=
tf
.
keras
.
layers
.
BatchNormalization
(
epsilon
=
0.001
,
self
.
batch_norm
=
tf
.
keras
.
layers
.
BatchNormalization
(
center
=
True
,
epsilon
=
0.001
,
scale
=
True
,
center
=
True
,
momentum
=
0.97
,
scale
=
True
,
trainable
=
(
not
freeze_batchnorm
))
momentum
=
0.97
,
trainable
=
(
not
freeze_batchnorm
))
self
.
projection
=
tf
.
keras
.
layers
.
Dense
(
units
=
projection_dimension
,
self
.
projection
=
tf
.
keras
.
layers
.
Dense
(
units
=
projection_dimension
,
activation
=
tf
.
nn
.
relu6
,
activation
=
tf
.
nn
.
relu6
,
use_bias
=
True
)
use_bias
=
True
)
super
(
ContextProjection
,
self
).
__init__
(
**
kwargs
)
super
(
ContextProjection
,
self
).
__init__
(
**
kwargs
)
def
build
(
self
,
input_shape
):
def
build
(
self
,
input_shape
):
self
.
batch_norm
.
build
(
input_shape
)
self
.
batch_norm
.
build
(
input_shape
)
self
.
projection
.
build
(
input_shape
)
self
.
projection
.
build
(
input_shape
)
def
call
(
self
,
input
):
def
call
(
self
,
input
_features
,
is_training
=
False
):
return
self
.
projection
(
self
.
batch_norm
(
input
))
return
self
.
projection
(
self
.
batch_norm
(
input
_features
,
is_training
))
def
filter_weight_value
(
weights
,
values
,
valid_mask
):
def
filter_weight_value
(
weights
,
values
,
valid_mask
):
...
@@ -119,7 +120,8 @@ def compute_valid_mask(num_valid_elements, num_elements):
...
@@ -119,7 +120,8 @@ def compute_valid_mask(num_valid_elements, num_elements):
valid_mask
=
tf
.
less
(
batch_element_idxs
,
num_valid_elements
)
valid_mask
=
tf
.
less
(
batch_element_idxs
,
num_valid_elements
)
return
valid_mask
return
valid_mask
def
project_features
(
features
,
projection_dimension
,
is_training
,
node
,
normalize
=
True
):
def
project_features
(
features
,
projection_dimension
,
is_training
,
node
,
normalize
=
True
):
"""Projects features to another feature space.
"""Projects features to another feature space.
Args:
Args:
...
@@ -127,7 +129,8 @@ def project_features(features, projection_dimension, is_training, node, normaliz
...
@@ -127,7 +129,8 @@ def project_features(features, projection_dimension, is_training, node, normaliz
num_features].
num_features].
projection_dimension: A int32 Tensor.
projection_dimension: A int32 Tensor.
is_training: A boolean Tensor (affecting batch normalization).
is_training: A boolean Tensor (affecting batch normalization).
node: Contains two layers (Batch Normalization and Dense) specific to the particular operation being performed (key, value, query, features)
node: Contains a custom layer specific to the particular operation
being performed (key, value, query, features)
normalize: A boolean Tensor. If true, the output features will be l2
normalize: A boolean Tensor. If true, the output features will be l2
normalized on the last dimension.
normalized on the last dimension.
...
@@ -135,12 +138,10 @@ def project_features(features, projection_dimension, is_training, node, normaliz
...
@@ -135,12 +138,10 @@ def project_features(features, projection_dimension, is_training, node, normaliz
A float Tensor of shape [batch, features_size, projection_dimension].
A float Tensor of shape [batch, features_size, projection_dimension].
"""
"""
shape_arr
=
features
.
shape
shape_arr
=
features
.
shape
batch_size
=
shape_arr
[
0
]
batch_size
,
_
,
num_features
=
shape_arr
feature_size
=
shape_arr
[
1
]
num_features
=
shape_arr
[
2
]
features
=
tf
.
reshape
(
features
,
[
-
1
,
num_features
])
features
=
tf
.
reshape
(
features
,
[
-
1
,
num_features
])
projected_features
=
node
(
features
)
projected_features
=
node
(
features
,
is_training
)
projected_features
=
tf
.
reshape
(
projected_features
,
projected_features
=
tf
.
reshape
(
projected_features
,
[
batch_size
,
-
1
,
projection_dimension
])
[
batch_size
,
-
1
,
projection_dimension
])
...
@@ -181,10 +182,10 @@ def attention_block(input_features, context_features, bottleneck_dimension,
...
@@ -181,10 +182,10 @@ def attention_block(input_features, context_features, bottleneck_dimension,
input_features
,
bottleneck_dimension
,
is_training
,
input_features
,
bottleneck_dimension
,
is_training
,
attention_projections
[
QUERY_NAME
],
normalize
=
True
)
attention_projections
[
QUERY_NAME
],
normalize
=
True
)
keys
=
project_features
(
keys
=
project_features
(
context_features
,
bottleneck_dimension
,
is_training
,
context_features
,
bottleneck_dimension
,
is_training
,
attention_projections
[
KEY_NAME
],
normalize
=
True
)
attention_projections
[
KEY_NAME
],
normalize
=
True
)
values
=
project_features
(
values
=
project_features
(
context_features
,
bottleneck_dimension
,
is_training
,
context_features
,
bottleneck_dimension
,
is_training
,
attention_projections
[
VALUE_NAME
],
normalize
=
True
)
attention_projections
[
VALUE_NAME
],
normalize
=
True
)
weights
=
tf
.
matmul
(
queries
,
keys
,
transpose_b
=
True
)
weights
=
tf
.
matmul
(
queries
,
keys
,
transpose_b
=
True
)
...
@@ -195,14 +196,14 @@ def attention_block(input_features, context_features, bottleneck_dimension,
...
@@ -195,14 +196,14 @@ def attention_block(input_features, context_features, bottleneck_dimension,
features
=
tf
.
matmul
(
weights
,
values
)
features
=
tf
.
matmul
(
weights
,
values
)
output_features
=
project_features
(
output_features
=
project_features
(
features
,
output_dimension
,
is_training
,
features
,
output_dimension
,
is_training
,
attention_projections
[
FEATURE_NAME
],
normalize
=
False
)
attention_projections
[
FEATURE_NAME
],
normalize
=
False
)
return
output_features
return
output_features
def
compute_box_context_attention
(
box_features
,
context_features
,
def
compute_box_context_attention
(
box_features
,
context_features
,
valid_context_size
,
bottleneck_dimension
,
valid_context_size
,
bottleneck_dimension
,
attention_temperature
,
is_training
,
attention_temperature
,
is_training
,
freeze_batchnorm
,
attention_projections
):
freeze_batchnorm
,
attention_projections
):
"""Computes the attention feature from the context given a batch of box.
"""Computes the attention feature from the context given a batch of box.
...
@@ -219,8 +220,8 @@ def compute_box_context_attention(box_features, context_features,
...
@@ -219,8 +220,8 @@ def compute_box_context_attention(box_features, context_features,
softmax for weights calculation. The formula for calculation as follows:
softmax for weights calculation. The formula for calculation as follows:
weights = exp(weights / temperature) / sum(exp(weights / temperature))
weights = exp(weights / temperature) / sum(exp(weights / temperature))
is_training: A boolean Tensor (affecting batch normalization).
is_training: A boolean Tensor (affecting batch normalization).
freeze_batchnorm:
A boolean indicating w
hether to freeze batch normalization weights.
freeze_batchnorm:
W
hether to freeze batch normalization weights.
attention_projections:
Contains a d
ictionary of the
batch norm and
projection
function
s.
attention_projections:
D
ictionary of the projection
layer
s.
Returns:
Returns:
A float Tensor of shape [batch_size, max_num_proposals, 1, 1, channels].
A float Tensor of shape [batch_size, max_num_proposals, 1, 1, channels].
...
@@ -231,8 +232,9 @@ def compute_box_context_attention(box_features, context_features,
...
@@ -231,8 +232,9 @@ def compute_box_context_attention(box_features, context_features,
channels
=
box_features
.
shape
[
-
1
]
channels
=
box_features
.
shape
[
-
1
]
if
'feature'
not
in
attention_projections
:
if
'feature'
not
in
attention_projections
:
attention_projections
[
FEATURE_NAME
]
=
ContextProjection
(
channels
,
freeze_batchnorm
)
attention_projections
[
FEATURE_NAME
]
=
ContextProjection
(
channels
,
freeze_batchnorm
)
# Average pools over height and width dimension so that the shape of
# Average pools over height and width dimension so that the shape of
# box_features becomes [batch_size, max_num_proposals, channels].
# box_features becomes [batch_size, max_num_proposals, channels].
box_features
=
tf
.
reduce_mean
(
box_features
,
[
2
,
3
])
box_features
=
tf
.
reduce_mean
(
box_features
,
[
2
,
3
])
...
...
research/object_detection/meta_architectures/context_rcnn_meta_arch.py
View file @
65bd772d
...
@@ -272,12 +272,12 @@ class ContextRCNNMetaArch(faster_rcnn_meta_arch.FasterRCNNMetaArch):
...
@@ -272,12 +272,12 @@ class ContextRCNNMetaArch(faster_rcnn_meta_arch.FasterRCNNMetaArch):
is_training
=
is_training
,
is_training
=
is_training
,
freeze_batchnorm
=
freeze_batchnorm
)
freeze_batchnorm
=
freeze_batchnorm
)
self
.
_atten
tion
_proj
ection
s
=
{
'key'
:
context_rcnn_lib
.
ContextProjection
(
self
.
_atten_projs
=
{
'key'
:
context_rcnn_lib
.
ContextProjection
(
attention_bottleneck_dimension
,
freeze_batchnorm
),
attention_bottleneck_dimension
,
freeze_batchnorm
),
'val'
:
context_rcnn_lib
.
ContextProjection
(
'val'
:
context_rcnn_lib
.
ContextProjection
(
attention_bottleneck_dimension
,
freeze_batchnorm
),
attention_bottleneck_dimension
,
freeze_batchnorm
),
'query'
:
context_rcnn_lib
.
ContextProjection
(
'query'
:
context_rcnn_lib
.
ContextProjection
(
attention_bottleneck_dimension
,
freeze_batchnorm
)}
attention_bottleneck_dimension
,
freeze_batchnorm
)}
@
staticmethod
@
staticmethod
def
get_side_inputs
(
features
):
def
get_side_inputs
(
features
):
...
@@ -340,7 +340,7 @@ class ContextRCNNMetaArch(faster_rcnn_meta_arch.FasterRCNNMetaArch):
...
@@ -340,7 +340,7 @@ class ContextRCNNMetaArch(faster_rcnn_meta_arch.FasterRCNNMetaArch):
box_features
=
box_features
,
box_features
=
box_features
,
context_features
=
context_features
,
context_features
=
context_features
,
valid_context_size
=
valid_context_size
,
valid_context_size
=
valid_context_size
,
attention_projections
=
self
.
_atten
tion
_proj
ection
s
)
attention_projections
=
self
.
_atten_projs
)
# Adds box features with attention features.
# Adds box features with attention features.
box_features
+=
attention_features
box_features
+=
attention_features
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment