Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ModelZoo
ResNet50_tensorflow
Commits
8b641b13
Unverified
Commit
8b641b13
authored
Mar 26, 2022
by
Srihari Humbarwadi
Committed by
GitHub
Mar 26, 2022
Browse files
Merge branch 'tensorflow:master' into panoptic-deeplab
parents
7cffacfe
357fa547
Changes
411
Show whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
82 additions
and
120 deletions
+82
-120
official/projects/qat/vision/modeling/layers/nn_layers.py
official/projects/qat/vision/modeling/layers/nn_layers.py
+41
-68
official/projects/qat/vision/n_bit/__init__.py
official/projects/qat/vision/n_bit/__init__.py
+0
-1
official/projects/qat/vision/n_bit/nn_blocks_test.py
official/projects/qat/vision/n_bit/nn_blocks_test.py
+0
-1
official/projects/qat/vision/n_bit/schemes.py
official/projects/qat/vision/n_bit/schemes.py
+1
-17
official/projects/qat/vision/quantization/__init__.py
official/projects/qat/vision/quantization/__init__.py
+0
-1
official/projects/qat/vision/quantization/helper.py
official/projects/qat/vision/quantization/helper.py
+36
-0
official/projects/qat/vision/quantization/schemes.py
official/projects/qat/vision/quantization/schemes.py
+2
-18
official/projects/qat/vision/tasks/__init__.py
official/projects/qat/vision/tasks/__init__.py
+0
-1
official/projects/qat/vision/tasks/image_classification.py
official/projects/qat/vision/tasks/image_classification.py
+0
-1
official/projects/qat/vision/tasks/image_classification_test.py
...al/projects/qat/vision/tasks/image_classification_test.py
+1
-1
official/projects/qat/vision/tasks/retinanet_test.py
official/projects/qat/vision/tasks/retinanet_test.py
+1
-2
official/projects/s3d/configs/s3d.py
official/projects/s3d/configs/s3d.py
+0
-1
official/projects/s3d/modeling/inception_utils.py
official/projects/s3d/modeling/inception_utils.py
+0
-1
official/projects/s3d/modeling/inception_utils_test.py
official/projects/s3d/modeling/inception_utils_test.py
+0
-1
official/projects/s3d/modeling/net_utils.py
official/projects/s3d/modeling/net_utils.py
+0
-1
official/projects/s3d/modeling/net_utils_test.py
official/projects/s3d/modeling/net_utils_test.py
+0
-1
official/projects/s3d/modeling/s3d.py
official/projects/s3d/modeling/s3d.py
+0
-1
official/projects/s3d/modeling/s3d_test.py
official/projects/s3d/modeling/s3d_test.py
+0
-1
official/projects/s3d/train.py
official/projects/s3d/train.py
+0
-1
official/projects/teams/teams_experiments.py
official/projects/teams/teams_experiments.py
+0
-1
No files found.
Too many changes to show.
To preserve performance only
411 of 411+
files are displayed.
Plain diff
Email patch
official/projects/qat/vision/modeling/layers/nn_layers.py
View file @
8b641b13
...
...
@@ -14,7 +14,7 @@
"""Contains common building blocks for neural networks."""
from
typing
import
Any
,
Callable
,
Dict
,
List
,
Mapping
,
Optional
,
Sequence
,
Tuple
,
Union
from
typing
import
Callable
,
Dict
,
List
,
Mapping
,
Optional
,
Sequence
,
Tuple
,
Union
import
tensorflow
as
tf
...
...
@@ -31,36 +31,6 @@ States = Dict[str, tf.Tensor]
Activation
=
Union
[
str
,
Callable
]
class
NoOpActivation
:
"""No-op activation which simply returns the incoming tensor.
This activation is required to distinguish between `keras.activations.linear`
which does the same thing. The main difference is that NoOpActivation should
not have any quantize operation applied to it.
"""
def
__call__
(
self
,
x
:
tf
.
Tensor
)
->
tf
.
Tensor
:
return
x
def
get_config
(
self
)
->
Dict
[
str
,
Any
]:
"""Get a config of this object."""
return
{}
def
__eq__
(
self
,
other
:
Any
)
->
bool
:
return
isinstance
(
other
,
NoOpActivation
)
def
__ne__
(
self
,
other
:
Any
)
->
bool
:
return
not
self
.
__eq__
(
other
)
def
_quantize_wrapped_layer
(
cls
,
quantize_config
):
def
constructor
(
*
arg
,
**
kwargs
):
return
tfmot
.
quantization
.
keras
.
QuantizeWrapperV2
(
cls
(
*
arg
,
**
kwargs
),
quantize_config
)
return
constructor
@
tf
.
keras
.
utils
.
register_keras_serializable
(
package
=
'Vision'
)
class
SqueezeExcitationQuantized
(
helper
.
LayerQuantizerHelper
,
...
...
@@ -154,14 +124,13 @@ class SqueezeExcitationQuantized(
return
x
def
build
(
self
,
input_shape
):
conv2d_quantized
=
_
quantize_wrapped_layer
(
conv2d_quantized
=
helper
.
quantize_wrapped_layer
(
tf
.
keras
.
layers
.
Conv2D
,
configs
.
Default8BitConvQuantizeConfig
(
[
'kernel'
],
[
'activation'
],
False
))
conv2d_quantized_output_quantized
=
_
quantize_wrapped_layer
(
configs
.
Default8BitConvQuantizeConfig
(
[
'kernel'
],
[
'activation'
],
False
))
conv2d_quantized_output_quantized
=
helper
.
quantize_wrapped_layer
(
tf
.
keras
.
layers
.
Conv2D
,
configs
.
Default8BitConvQuantizeConfig
(
[
'kernel'
],
[
'activation'
],
True
))
configs
.
Default8BitConvQuantizeConfig
([
'kernel'
],
[
'activation'
],
True
))
num_reduced_filters
=
nn_layers
.
make_divisible
(
max
(
1
,
int
(
self
.
_in_filters
*
self
.
_se_ratio
)),
divisor
=
self
.
_divisible_by
,
...
...
@@ -176,7 +145,7 @@ class SqueezeExcitationQuantized(
kernel_initializer
=
self
.
_kernel_initializer
,
kernel_regularizer
=
self
.
_kernel_regularizer
,
bias_regularizer
=
self
.
_bias_regularizer
,
activation
=
NoOpActivation
())
activation
=
helper
.
NoOpActivation
())
self
.
_se_expand
=
conv2d_quantized_output_quantized
(
filters
=
self
.
_out_filters
,
...
...
@@ -187,7 +156,7 @@ class SqueezeExcitationQuantized(
kernel_initializer
=
self
.
_kernel_initializer
,
kernel_regularizer
=
self
.
_kernel_regularizer
,
bias_regularizer
=
self
.
_bias_regularizer
,
activation
=
NoOpActivation
())
activation
=
helper
.
NoOpActivation
())
self
.
_multiply
=
tfmot
.
quantization
.
keras
.
QuantizeWrapperV2
(
tf
.
keras
.
layers
.
Multiply
(),
...
...
@@ -342,14 +311,14 @@ class SegmentationHeadQuantized(tf.keras.layers.Layer):
backbone_shape
=
input_shape
[
0
]
use_depthwise_convolution
=
self
.
_config_dict
[
'use_depthwise_convolution'
]
random_initializer
=
tf
.
keras
.
initializers
.
RandomNormal
(
stddev
=
0.01
)
conv2d_quantized
=
_
quantize_wrapped_layer
(
conv2d_quantized
=
helper
.
quantize_wrapped_layer
(
tf
.
keras
.
layers
.
Conv2D
,
configs
.
Default8BitConvQuantizeConfig
([
'kernel'
],
[
'activation'
],
False
))
conv2d_quantized_output_quantized
=
_
quantize_wrapped_layer
(
conv2d_quantized_output_quantized
=
helper
.
quantize_wrapped_layer
(
tf
.
keras
.
layers
.
Conv2D
,
configs
.
Default8BitConvQuantizeConfig
([
'kernel'
],
[
'activation'
],
True
))
depthwise_conv2d_quantized
=
_
quantize_wrapped_layer
(
depthwise_conv2d_quantized
=
helper
.
quantize_wrapped_layer
(
tf
.
keras
.
layers
.
DepthwiseConv2D
,
configs
.
Default8BitConvQuantizeConfig
([
'depthwise_kernel'
],
[
'activation'
],
False
))
...
...
@@ -365,11 +334,13 @@ class SegmentationHeadQuantized(tf.keras.layers.Layer):
tf
.
keras
.
layers
.
experimental
.
SyncBatchNormalization
if
self
.
_config_dict
[
'use_sync_bn'
]
else
tf
.
keras
.
layers
.
BatchNormalization
)
norm_with_quantize
=
_
quantize_wrapped_layer
(
norm_with_quantize
=
helper
.
quantize_wrapped_layer
(
norm_layer
,
configs
.
Default8BitOutputQuantizeConfig
())
norm
=
norm_with_quantize
if
self
.
_config_dict
[
'activation'
]
not
in
[
'relu'
,
'relu6'
]
else
_quantize_wrapped_layer
(
norm_layer
,
configs
.
NoOpQuantizeConfig
())
if
self
.
_config_dict
[
'activation'
]
not
in
[
'relu'
,
'relu6'
]:
norm
=
norm_with_quantize
else
:
norm
=
helper
.
quantize_wrapped_layer
(
norm_layer
,
configs
.
NoOpQuantizeConfig
())
bn_kwargs
=
{
'axis'
:
self
.
_bn_axis
,
...
...
@@ -387,7 +358,7 @@ class SegmentationHeadQuantized(tf.keras.layers.Layer):
kernel_regularizer
=
self
.
_config_dict
[
'kernel_regularizer'
],
name
=
'segmentation_head_deeplabv3p_fusion_conv'
,
filters
=
self
.
_config_dict
[
'low_level_num_filters'
],
activation
=
NoOpActivation
())
activation
=
helper
.
NoOpActivation
())
self
.
_dlv3p_norm
=
norm
(
name
=
'segmentation_head_deeplabv3p_fusion_norm'
,
**
bn_kwargs
)
...
...
@@ -406,7 +377,7 @@ class SegmentationHeadQuantized(tf.keras.layers.Layer):
depthwise_initializer
=
random_initializer
,
depthwise_regularizer
=
self
.
_config_dict
[
'kernel_regularizer'
],
depth_multiplier
=
1
,
activation
=
NoOpActivation
()))
activation
=
helper
.
NoOpActivation
()))
norm_name
=
'segmentation_head_depthwise_norm_{}'
.
format
(
i
)
self
.
_norms
.
append
(
norm
(
name
=
norm_name
,
**
bn_kwargs
))
conv_name
=
'segmentation_head_conv_{}'
.
format
(
i
)
...
...
@@ -414,7 +385,7 @@ class SegmentationHeadQuantized(tf.keras.layers.Layer):
conv2d_quantized
(
name
=
conv_name
,
filters
=
self
.
_config_dict
[
'num_filters'
],
activation
=
NoOpActivation
(),
activation
=
helper
.
NoOpActivation
(),
**
conv_kwargs
))
norm_name
=
'segmentation_head_norm_{}'
.
format
(
i
)
self
.
_norms
.
append
(
norm
(
name
=
norm_name
,
**
bn_kwargs
))
...
...
@@ -428,9 +399,9 @@ class SegmentationHeadQuantized(tf.keras.layers.Layer):
kernel_initializer
=
tf
.
keras
.
initializers
.
RandomNormal
(
stddev
=
0.01
),
kernel_regularizer
=
self
.
_config_dict
[
'kernel_regularizer'
],
bias_regularizer
=
self
.
_config_dict
[
'bias_regularizer'
],
activation
=
NoOpActivation
())
activation
=
helper
.
NoOpActivation
())
upsampling
=
_
quantize_wrapped_layer
(
upsampling
=
helper
.
quantize_wrapped_layer
(
tf
.
keras
.
layers
.
UpSampling2D
,
configs
.
Default8BitQuantizeConfig
([],
[],
True
))
self
.
_upsampling_layer
=
upsampling
(
...
...
@@ -440,7 +411,7 @@ class SegmentationHeadQuantized(tf.keras.layers.Layer):
self
.
_resizing_layer
=
tf
.
keras
.
layers
.
Resizing
(
backbone_shape
[
1
],
backbone_shape
[
2
],
interpolation
=
'bilinear'
)
concat
=
_
quantize_wrapped_layer
(
concat
=
helper
.
quantize_wrapped_layer
(
tf
.
keras
.
layers
.
Concatenate
,
configs
.
Default8BitQuantizeConfig
([],
[],
True
))
self
.
_concat_layer
=
concat
(
axis
=
self
.
_bn_axis
)
...
...
@@ -589,17 +560,19 @@ class SpatialPyramidPoolingQuantized(nn_layers.SpatialPyramidPooling):
norm_layer
=
(
tf
.
keras
.
layers
.
experimental
.
SyncBatchNormalization
if
self
.
_use_sync_bn
else
tf
.
keras
.
layers
.
BatchNormalization
)
norm_with_quantize
=
_
quantize_wrapped_layer
(
norm_with_quantize
=
helper
.
quantize_wrapped_layer
(
norm_layer
,
configs
.
Default8BitOutputQuantizeConfig
())
norm
=
norm_with_quantize
if
self
.
_activation
not
in
[
'relu'
,
'relu6'
]
else
_quantize_wrapped_layer
(
norm_layer
,
configs
.
NoOpQuantizeConfig
())
if
self
.
_activation
not
in
[
'relu'
,
'relu6'
]:
norm
=
norm_with_quantize
else
:
norm
=
helper
.
quantize_wrapped_layer
(
norm_layer
,
configs
.
NoOpQuantizeConfig
())
conv2d_quantized
=
_
quantize_wrapped_layer
(
conv2d_quantized
=
helper
.
quantize_wrapped_layer
(
tf
.
keras
.
layers
.
Conv2D
,
configs
.
Default8BitConvQuantizeConfig
([
'kernel'
],
[
'activation'
],
False
))
depthwise_conv2d_quantized_output_quantized
=
_
quantize_wrapped_layer
(
depthwise_conv2d_quantized_output_quantized
=
helper
.
quantize_wrapped_layer
(
tf
.
keras
.
layers
.
DepthwiseConv2D
,
configs
.
Default8BitConvQuantizeConfig
([
'depthwise_kernel'
],
[
'activation'
],
True
))
...
...
@@ -612,7 +585,7 @@ class SpatialPyramidPoolingQuantized(nn_layers.SpatialPyramidPooling):
kernel_initializer
=
self
.
_kernel_initializer
,
kernel_regularizer
=
self
.
_kernel_regularizer
,
use_bias
=
False
,
activation
=
NoOpActivation
())
activation
=
helper
.
NoOpActivation
())
norm1
=
norm
(
axis
=
self
.
_bn_axis
,
momentum
=
self
.
_batchnorm_momentum
,
...
...
@@ -633,7 +606,7 @@ class SpatialPyramidPoolingQuantized(nn_layers.SpatialPyramidPooling):
depthwise_initializer
=
self
.
_kernel_initializer
,
dilation_rate
=
dilation_rate
,
use_bias
=
False
,
activation
=
NoOpActivation
())
activation
=
helper
.
NoOpActivation
())
]
kernel_size
=
(
1
,
1
)
conv_dilation
=
leading_layers
+
[
...
...
@@ -645,7 +618,7 @@ class SpatialPyramidPoolingQuantized(nn_layers.SpatialPyramidPooling):
kernel_initializer
=
self
.
_kernel_initializer
,
dilation_rate
=
dilation_rate
,
use_bias
=
False
,
activation
=
NoOpActivation
())
activation
=
helper
.
NoOpActivation
())
]
norm_dilation
=
norm
(
axis
=
self
.
_bn_axis
,
...
...
@@ -656,16 +629,16 @@ class SpatialPyramidPoolingQuantized(nn_layers.SpatialPyramidPooling):
if
self
.
_pool_kernel_size
is
None
:
pooling
=
[
_
quantize_wrapped_layer
(
helper
.
quantize_wrapped_layer
(
tf
.
keras
.
layers
.
GlobalAveragePooling2D
,
configs
.
Default8BitQuantizeConfig
([],
[],
True
))(),
_
quantize_wrapped_layer
(
helper
.
quantize_wrapped_layer
(
tf
.
keras
.
layers
.
Reshape
,
configs
.
Default8BitQuantizeConfig
([],
[],
True
))((
1
,
1
,
channels
))
]
else
:
pooling
=
[
_
quantize_wrapped_layer
(
helper
.
quantize_wrapped_layer
(
tf
.
keras
.
layers
.
AveragePooling2D
,
configs
.
Default8BitQuantizeConfig
([],
[],
True
))(
self
.
_pool_kernel_size
)
...
...
@@ -677,7 +650,7 @@ class SpatialPyramidPoolingQuantized(nn_layers.SpatialPyramidPooling):
kernel_initializer
=
self
.
_kernel_initializer
,
kernel_regularizer
=
self
.
_kernel_regularizer
,
use_bias
=
False
,
activation
=
NoOpActivation
())
activation
=
helper
.
NoOpActivation
())
norm2
=
norm
(
axis
=
self
.
_bn_axis
,
momentum
=
self
.
_batchnorm_momentum
,
...
...
@@ -685,7 +658,7 @@ class SpatialPyramidPoolingQuantized(nn_layers.SpatialPyramidPooling):
self
.
aspp_layers
.
append
(
pooling
+
[
conv2
,
norm2
])
resizing
=
_
quantize_wrapped_layer
(
resizing
=
helper
.
quantize_wrapped_layer
(
tf
.
keras
.
layers
.
Resizing
,
configs
.
Default8BitQuantizeConfig
([],
[],
True
))
self
.
_resizing_layer
=
resizing
(
...
...
@@ -698,14 +671,14 @@ class SpatialPyramidPoolingQuantized(nn_layers.SpatialPyramidPooling):
kernel_initializer
=
self
.
_kernel_initializer
,
kernel_regularizer
=
self
.
_kernel_regularizer
,
use_bias
=
False
,
activation
=
NoOpActivation
()),
activation
=
helper
.
NoOpActivation
()),
norm_with_quantize
(
axis
=
self
.
_bn_axis
,
momentum
=
self
.
_batchnorm_momentum
,
epsilon
=
self
.
_batchnorm_epsilon
)
]
self
.
_dropout_layer
=
tf
.
keras
.
layers
.
Dropout
(
rate
=
self
.
_dropout
)
concat
=
_
quantize_wrapped_layer
(
concat
=
helper
.
quantize_wrapped_layer
(
tf
.
keras
.
layers
.
Concatenate
,
configs
.
Default8BitQuantizeConfig
([],
[],
True
))
self
.
_concat_layer
=
concat
(
axis
=-
1
)
...
...
official/projects/qat/vision/n_bit/__init__.py
View file @
8b641b13
...
...
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Configs package definition."""
from
official.projects.qat.vision.n_bit
import
configs
...
...
official/projects/qat/vision/n_bit/nn_blocks_test.py
View file @
8b641b13
...
...
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for nn_blocks."""
from
typing
import
Any
,
Iterable
,
Tuple
...
...
official/projects/qat/vision/n_bit/schemes.py
View file @
8b641b13
...
...
@@ -199,23 +199,7 @@ class QuantizeLayoutTransform(
'Vision>Conv2DBNBlock'
,
nn_blocks
.
Conv2DBNBlockNBitQuantized
,
num_bits_weight
=
self
.
_num_bits_weight
,
num_bits_activation
=
self
.
_num_bits_activation
),
# TODO(yeqing): Remove the `Beta` custom layers.
CustomLayerQuantize
(
'Beta>BottleneckBlock'
,
nn_blocks
.
BottleneckBlockNBitQuantized
,
num_bits_weight
=
self
.
_num_bits_weight
,
num_bits_activation
=
self
.
_num_bits_activation
),
CustomLayerQuantize
(
'Beta>InvertedBottleneckBlock'
,
nn_blocks
.
InvertedBottleneckBlockNBitQuantized
,
num_bits_weight
=
self
.
_num_bits_weight
,
num_bits_activation
=
self
.
_num_bits_activation
),
CustomLayerQuantize
(
'Beta>Conv2DBNBlock'
,
nn_blocks
.
Conv2DBNBlockNBitQuantized
,
num_bits_weight
=
self
.
_num_bits_weight
,
num_bits_activation
=
self
.
_num_bits_activation
),
num_bits_activation
=
self
.
_num_bits_activation
)
]
return
_ModelTransformer
(
model
,
transforms
,
set
(
layer_quantize_map
.
keys
()),
layer_quantize_map
).
transform
()
...
...
official/projects/qat/vision/quantization/__init__.py
View file @
8b641b13
...
...
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Configs package definition."""
from
official.projects.qat.vision.quantization
import
configs
...
...
official/projects/qat/vision/quantization/helper.py
View file @
8b641b13
...
...
@@ -13,7 +13,9 @@
# limitations under the License.
"""Quantization helpers."""
from
typing
import
Any
,
Dict
import
tensorflow
as
tf
import
tensorflow_model_optimization
as
tfmot
...
...
@@ -47,3 +49,37 @@ class LayerQuantizerHelper(object):
for
name
in
self
.
_quantizers
:
self
.
_quantizer_vars
[
name
]
=
self
.
_quantizers
[
name
].
build
(
tensor_shape
=
None
,
name
=
name
,
layer
=
self
)
class
NoOpActivation
:
"""No-op activation which simply returns the incoming tensor.
This activation is required to distinguish between `keras.activations.linear`
which does the same thing. The main difference is that NoOpActivation should
not have any quantize operation applied to it.
"""
def
__call__
(
self
,
x
:
tf
.
Tensor
)
->
tf
.
Tensor
:
return
x
def
get_config
(
self
)
->
Dict
[
str
,
Any
]:
"""Get a config of this object."""
return
{}
def
__eq__
(
self
,
other
:
Any
)
->
bool
:
if
not
other
or
not
isinstance
(
other
,
NoOpActivation
):
return
False
return
True
def
__ne__
(
self
,
other
:
Any
)
->
bool
:
return
not
self
.
__eq__
(
other
)
def
quantize_wrapped_layer
(
cls
,
quantize_config
):
def
constructor
(
*
arg
,
**
kwargs
):
return
tfmot
.
quantization
.
keras
.
QuantizeWrapperV2
(
cls
(
*
arg
,
**
kwargs
),
quantize_config
)
return
constructor
official/projects/qat/vision/quantization/schemes.py
View file @
8b641b13
...
...
@@ -102,10 +102,7 @@ class CustomLayerQuantize(
if
bottleneck_layer
[
'class_name'
]
in
[
'Vision>Conv2DBNBlock'
,
'Vision>InvertedBottleneckBlock'
,
'Vision>SegmentationHead'
,
'Vision>SpatialPyramidPooling'
,
'Vision>ASPP'
,
# TODO(yeqing): Removes the Beta layers.
'Beta>Conv2DBNBlock'
,
'Beta>InvertedBottleneckBlock'
,
'Beta>SegmentationHead'
,
'Beta>SpatialPyramidPooling'
,
'Beta>ASPP'
'Vision>ASPP'
]:
layer_metadata
=
{
'quantize_config'
:
configs
.
NoOpQuantizeConfig
()}
else
:
...
...
@@ -170,20 +167,7 @@ class QuantizeLayoutTransform(
quantized_nn_layers
.
SegmentationHeadQuantized
),
CustomLayerQuantize
(
'Vision>SpatialPyramidPooling'
,
quantized_nn_layers
.
SpatialPyramidPoolingQuantized
),
CustomLayerQuantize
(
'Vision>ASPP'
,
quantized_nn_layers
.
ASPPQuantized
),
# TODO(yeqing): Remove the `Beta` components.
CustomLayerQuantize
(
'Beta>BottleneckBlock'
,
quantized_nn_blocks
.
BottleneckBlockQuantized
),
CustomLayerQuantize
(
'Beta>InvertedBottleneckBlock'
,
quantized_nn_blocks
.
InvertedBottleneckBlockQuantized
),
CustomLayerQuantize
(
'Beta>Conv2DBNBlock'
,
quantized_nn_blocks
.
Conv2DBNBlockQuantized
),
CustomLayerQuantize
(
'Beta>SegmentationHead'
,
quantized_nn_layers
.
SegmentationHeadQuantized
),
CustomLayerQuantize
(
'Beta>SpatialPyramidPooling'
,
quantized_nn_layers
.
SpatialPyramidPoolingQuantized
),
CustomLayerQuantize
(
'Beta>ASPP'
,
quantized_nn_layers
.
ASPPQuantized
)
CustomLayerQuantize
(
'Vision>ASPP'
,
quantized_nn_layers
.
ASPPQuantized
)
]
return
tfmot
.
quantization
.
keras
.
graph_transformations
.
model_transformer
.
ModelTransformer
(
model
,
transforms
,
...
...
official/projects/qat/vision/tasks/__init__.py
View file @
8b641b13
...
...
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tasks package definition."""
from
official.projects.qat.vision.tasks
import
image_classification
official/projects/qat/vision/tasks/image_classification.py
View file @
8b641b13
...
...
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Image classification task definition."""
import
tensorflow
as
tf
...
...
official/projects/qat/vision/tasks/image_classification_test.py
View file @
8b641b13
...
...
@@ -19,10 +19,10 @@ from absl.testing import parameterized
import
orbit
import
tensorflow
as
tf
from
official
import
vision
from
official.core
import
exp_factory
from
official.modeling
import
optimization
from
official.projects.qat.vision.tasks
import
image_classification
as
img_cls_task
from
official.vision
import
beta
class
ImageClassificationTaskTest
(
tf
.
test
.
TestCase
,
parameterized
.
TestCase
):
...
...
official/projects/qat/vision/tasks/retinanet_test.py
View file @
8b641b13
...
...
@@ -12,17 +12,16 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for RetinaNet task."""
# pylint: disable=unused-import
from
absl.testing
import
parameterized
import
orbit
import
tensorflow
as
tf
from
official
import
vision
from
official.core
import
exp_factory
from
official.modeling
import
optimization
from
official.projects.qat.vision.tasks
import
retinanet
from
official.vision
import
beta
from
official.vision.configs
import
retinanet
as
exp_cfg
...
...
official/projects/s3d/configs/s3d.py
View file @
8b641b13
...
...
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""S3D model configurations."""
import
dataclasses
from
typing
import
Text
...
...
official/projects/s3d/modeling/inception_utils.py
View file @
8b641b13
...
...
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Contains modules related to Inception networks."""
from
typing
import
Callable
,
Dict
,
Optional
,
Sequence
,
Set
,
Text
,
Tuple
,
Type
,
Union
...
...
official/projects/s3d/modeling/inception_utils_test.py
View file @
8b641b13
...
...
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
from
absl.testing
import
parameterized
import
tensorflow
as
tf
...
...
official/projects/s3d/modeling/net_utils.py
View file @
8b641b13
...
...
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Commonly used TensorFlow 2 network blocks."""
from
typing
import
Any
,
Text
,
Sequence
,
Union
...
...
official/projects/s3d/modeling/net_utils_test.py
View file @
8b641b13
...
...
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
from
absl
import
logging
from
absl.testing
import
parameterized
...
...
official/projects/s3d/modeling/s3d.py
View file @
8b641b13
...
...
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Contains the Tensorflow 2 version definition of S3D model.
S3D model is described in the following paper:
...
...
official/projects/s3d/modeling/s3d_test.py
View file @
8b641b13
...
...
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for S3D model."""
from
absl.testing
import
parameterized
...
...
official/projects/s3d/train.py
View file @
8b641b13
...
...
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""TensorFlow Model Garden Vision training driver for S3D."""
from
absl
import
app
...
...
official/projects/teams/teams_experiments.py
View file @
8b641b13
...
...
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: disable=g-doc-return-or-yield,line-too-long
"""TEAMS experiments."""
import
dataclasses
...
...
Prev
1
2
3
4
5
6
7
8
9
10
…
21
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment