Unverified Commit 8b641b13 authored by Srihari Humbarwadi's avatar Srihari Humbarwadi Committed by GitHub
Browse files

Merge branch 'tensorflow:master' into panoptic-deeplab

parents 7cffacfe 357fa547
......@@ -14,7 +14,7 @@
"""Contains common building blocks for neural networks."""
from typing import Any, Callable, Dict, List, Mapping, Optional, Sequence, Tuple, Union
from typing import Callable, Dict, List, Mapping, Optional, Sequence, Tuple, Union
import tensorflow as tf
......@@ -31,36 +31,6 @@ States = Dict[str, tf.Tensor]
Activation = Union[str, Callable]
class NoOpActivation:
"""No-op activation which simply returns the incoming tensor.
This activation is required to distinguish between `keras.activations.linear`
which does the same thing. The main difference is that NoOpActivation should
not have any quantize operation applied to it.
"""
def __call__(self, x: tf.Tensor) -> tf.Tensor:
return x
def get_config(self) -> Dict[str, Any]:
"""Get a config of this object."""
return {}
def __eq__(self, other: Any) -> bool:
return isinstance(other, NoOpActivation)
def __ne__(self, other: Any) -> bool:
return not self.__eq__(other)
def _quantize_wrapped_layer(cls, quantize_config):
def constructor(*arg, **kwargs):
return tfmot.quantization.keras.QuantizeWrapperV2(
cls(*arg, **kwargs),
quantize_config)
return constructor
@tf.keras.utils.register_keras_serializable(package='Vision')
class SqueezeExcitationQuantized(
helper.LayerQuantizerHelper,
......@@ -154,14 +124,13 @@ class SqueezeExcitationQuantized(
return x
def build(self, input_shape):
conv2d_quantized = _quantize_wrapped_layer(
conv2d_quantized = helper.quantize_wrapped_layer(
tf.keras.layers.Conv2D,
configs.Default8BitConvQuantizeConfig(
['kernel'], ['activation'], False))
conv2d_quantized_output_quantized = _quantize_wrapped_layer(
configs.Default8BitConvQuantizeConfig(['kernel'], ['activation'],
False))
conv2d_quantized_output_quantized = helper.quantize_wrapped_layer(
tf.keras.layers.Conv2D,
configs.Default8BitConvQuantizeConfig(
['kernel'], ['activation'], True))
configs.Default8BitConvQuantizeConfig(['kernel'], ['activation'], True))
num_reduced_filters = nn_layers.make_divisible(
max(1, int(self._in_filters * self._se_ratio)),
divisor=self._divisible_by,
......@@ -176,7 +145,7 @@ class SqueezeExcitationQuantized(
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activation=NoOpActivation())
activation=helper.NoOpActivation())
self._se_expand = conv2d_quantized_output_quantized(
filters=self._out_filters,
......@@ -187,7 +156,7 @@ class SqueezeExcitationQuantized(
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activation=NoOpActivation())
activation=helper.NoOpActivation())
self._multiply = tfmot.quantization.keras.QuantizeWrapperV2(
tf.keras.layers.Multiply(),
......@@ -342,14 +311,14 @@ class SegmentationHeadQuantized(tf.keras.layers.Layer):
backbone_shape = input_shape[0]
use_depthwise_convolution = self._config_dict['use_depthwise_convolution']
random_initializer = tf.keras.initializers.RandomNormal(stddev=0.01)
conv2d_quantized = _quantize_wrapped_layer(
conv2d_quantized = helper.quantize_wrapped_layer(
tf.keras.layers.Conv2D,
configs.Default8BitConvQuantizeConfig(['kernel'], ['activation'],
False))
conv2d_quantized_output_quantized = _quantize_wrapped_layer(
conv2d_quantized_output_quantized = helper.quantize_wrapped_layer(
tf.keras.layers.Conv2D,
configs.Default8BitConvQuantizeConfig(['kernel'], ['activation'], True))
depthwise_conv2d_quantized = _quantize_wrapped_layer(
depthwise_conv2d_quantized = helper.quantize_wrapped_layer(
tf.keras.layers.DepthwiseConv2D,
configs.Default8BitConvQuantizeConfig(['depthwise_kernel'],
['activation'], False))
......@@ -365,11 +334,13 @@ class SegmentationHeadQuantized(tf.keras.layers.Layer):
tf.keras.layers.experimental.SyncBatchNormalization
if self._config_dict['use_sync_bn'] else
tf.keras.layers.BatchNormalization)
norm_with_quantize = _quantize_wrapped_layer(
norm_with_quantize = helper.quantize_wrapped_layer(
norm_layer, configs.Default8BitOutputQuantizeConfig())
norm = norm_with_quantize if self._config_dict['activation'] not in [
'relu', 'relu6'
] else _quantize_wrapped_layer(norm_layer, configs.NoOpQuantizeConfig())
if self._config_dict['activation'] not in ['relu', 'relu6']:
norm = norm_with_quantize
else:
norm = helper.quantize_wrapped_layer(norm_layer,
configs.NoOpQuantizeConfig())
bn_kwargs = {
'axis': self._bn_axis,
......@@ -387,7 +358,7 @@ class SegmentationHeadQuantized(tf.keras.layers.Layer):
kernel_regularizer=self._config_dict['kernel_regularizer'],
name='segmentation_head_deeplabv3p_fusion_conv',
filters=self._config_dict['low_level_num_filters'],
activation=NoOpActivation())
activation=helper.NoOpActivation())
self._dlv3p_norm = norm(
name='segmentation_head_deeplabv3p_fusion_norm', **bn_kwargs)
......@@ -406,7 +377,7 @@ class SegmentationHeadQuantized(tf.keras.layers.Layer):
depthwise_initializer=random_initializer,
depthwise_regularizer=self._config_dict['kernel_regularizer'],
depth_multiplier=1,
activation=NoOpActivation()))
activation=helper.NoOpActivation()))
norm_name = 'segmentation_head_depthwise_norm_{}'.format(i)
self._norms.append(norm(name=norm_name, **bn_kwargs))
conv_name = 'segmentation_head_conv_{}'.format(i)
......@@ -414,7 +385,7 @@ class SegmentationHeadQuantized(tf.keras.layers.Layer):
conv2d_quantized(
name=conv_name,
filters=self._config_dict['num_filters'],
activation=NoOpActivation(),
activation=helper.NoOpActivation(),
**conv_kwargs))
norm_name = 'segmentation_head_norm_{}'.format(i)
self._norms.append(norm(name=norm_name, **bn_kwargs))
......@@ -428,9 +399,9 @@ class SegmentationHeadQuantized(tf.keras.layers.Layer):
kernel_initializer=tf.keras.initializers.RandomNormal(stddev=0.01),
kernel_regularizer=self._config_dict['kernel_regularizer'],
bias_regularizer=self._config_dict['bias_regularizer'],
activation=NoOpActivation())
activation=helper.NoOpActivation())
upsampling = _quantize_wrapped_layer(
upsampling = helper.quantize_wrapped_layer(
tf.keras.layers.UpSampling2D,
configs.Default8BitQuantizeConfig([], [], True))
self._upsampling_layer = upsampling(
......@@ -440,7 +411,7 @@ class SegmentationHeadQuantized(tf.keras.layers.Layer):
self._resizing_layer = tf.keras.layers.Resizing(
backbone_shape[1], backbone_shape[2], interpolation='bilinear')
concat = _quantize_wrapped_layer(
concat = helper.quantize_wrapped_layer(
tf.keras.layers.Concatenate,
configs.Default8BitQuantizeConfig([], [], True))
self._concat_layer = concat(axis=self._bn_axis)
......@@ -589,17 +560,19 @@ class SpatialPyramidPoolingQuantized(nn_layers.SpatialPyramidPooling):
norm_layer = (
tf.keras.layers.experimental.SyncBatchNormalization
if self._use_sync_bn else tf.keras.layers.BatchNormalization)
norm_with_quantize = _quantize_wrapped_layer(
norm_with_quantize = helper.quantize_wrapped_layer(
norm_layer, configs.Default8BitOutputQuantizeConfig())
norm = norm_with_quantize if self._activation not in [
'relu', 'relu6'
] else _quantize_wrapped_layer(norm_layer, configs.NoOpQuantizeConfig())
if self._activation not in ['relu', 'relu6']:
norm = norm_with_quantize
else:
norm = helper.quantize_wrapped_layer(norm_layer,
configs.NoOpQuantizeConfig())
conv2d_quantized = _quantize_wrapped_layer(
conv2d_quantized = helper.quantize_wrapped_layer(
tf.keras.layers.Conv2D,
configs.Default8BitConvQuantizeConfig(['kernel'], ['activation'],
False))
depthwise_conv2d_quantized_output_quantized = _quantize_wrapped_layer(
depthwise_conv2d_quantized_output_quantized = helper.quantize_wrapped_layer(
tf.keras.layers.DepthwiseConv2D,
configs.Default8BitConvQuantizeConfig(['depthwise_kernel'],
['activation'], True))
......@@ -612,7 +585,7 @@ class SpatialPyramidPoolingQuantized(nn_layers.SpatialPyramidPooling):
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
use_bias=False,
activation=NoOpActivation())
activation=helper.NoOpActivation())
norm1 = norm(
axis=self._bn_axis,
momentum=self._batchnorm_momentum,
......@@ -633,7 +606,7 @@ class SpatialPyramidPoolingQuantized(nn_layers.SpatialPyramidPooling):
depthwise_initializer=self._kernel_initializer,
dilation_rate=dilation_rate,
use_bias=False,
activation=NoOpActivation())
activation=helper.NoOpActivation())
]
kernel_size = (1, 1)
conv_dilation = leading_layers + [
......@@ -645,7 +618,7 @@ class SpatialPyramidPoolingQuantized(nn_layers.SpatialPyramidPooling):
kernel_initializer=self._kernel_initializer,
dilation_rate=dilation_rate,
use_bias=False,
activation=NoOpActivation())
activation=helper.NoOpActivation())
]
norm_dilation = norm(
axis=self._bn_axis,
......@@ -656,16 +629,16 @@ class SpatialPyramidPoolingQuantized(nn_layers.SpatialPyramidPooling):
if self._pool_kernel_size is None:
pooling = [
_quantize_wrapped_layer(
helper.quantize_wrapped_layer(
tf.keras.layers.GlobalAveragePooling2D,
configs.Default8BitQuantizeConfig([], [], True))(),
_quantize_wrapped_layer(
helper.quantize_wrapped_layer(
tf.keras.layers.Reshape,
configs.Default8BitQuantizeConfig([], [], True))((1, 1, channels))
]
else:
pooling = [
_quantize_wrapped_layer(
helper.quantize_wrapped_layer(
tf.keras.layers.AveragePooling2D,
configs.Default8BitQuantizeConfig([], [],
True))(self._pool_kernel_size)
......@@ -677,7 +650,7 @@ class SpatialPyramidPoolingQuantized(nn_layers.SpatialPyramidPooling):
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
use_bias=False,
activation=NoOpActivation())
activation=helper.NoOpActivation())
norm2 = norm(
axis=self._bn_axis,
momentum=self._batchnorm_momentum,
......@@ -685,7 +658,7 @@ class SpatialPyramidPoolingQuantized(nn_layers.SpatialPyramidPooling):
self.aspp_layers.append(pooling + [conv2, norm2])
resizing = _quantize_wrapped_layer(
resizing = helper.quantize_wrapped_layer(
tf.keras.layers.Resizing, configs.Default8BitQuantizeConfig([], [],
True))
self._resizing_layer = resizing(
......@@ -698,14 +671,14 @@ class SpatialPyramidPoolingQuantized(nn_layers.SpatialPyramidPooling):
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
use_bias=False,
activation=NoOpActivation()),
activation=helper.NoOpActivation()),
norm_with_quantize(
axis=self._bn_axis,
momentum=self._batchnorm_momentum,
epsilon=self._batchnorm_epsilon)
]
self._dropout_layer = tf.keras.layers.Dropout(rate=self._dropout)
concat = _quantize_wrapped_layer(
concat = helper.quantize_wrapped_layer(
tf.keras.layers.Concatenate,
configs.Default8BitQuantizeConfig([], [], True))
self._concat_layer = concat(axis=-1)
......
......@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Configs package definition."""
from official.projects.qat.vision.n_bit import configs
......
......@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for nn_blocks."""
from typing import Any, Iterable, Tuple
......
......@@ -199,23 +199,7 @@ class QuantizeLayoutTransform(
'Vision>Conv2DBNBlock',
nn_blocks.Conv2DBNBlockNBitQuantized,
num_bits_weight=self._num_bits_weight,
num_bits_activation=self._num_bits_activation),
# TODO(yeqing): Remove the `Beta` custom layers.
CustomLayerQuantize(
'Beta>BottleneckBlock',
nn_blocks.BottleneckBlockNBitQuantized,
num_bits_weight=self._num_bits_weight,
num_bits_activation=self._num_bits_activation),
CustomLayerQuantize(
'Beta>InvertedBottleneckBlock',
nn_blocks.InvertedBottleneckBlockNBitQuantized,
num_bits_weight=self._num_bits_weight,
num_bits_activation=self._num_bits_activation),
CustomLayerQuantize(
'Beta>Conv2DBNBlock',
nn_blocks.Conv2DBNBlockNBitQuantized,
num_bits_weight=self._num_bits_weight,
num_bits_activation=self._num_bits_activation),
num_bits_activation=self._num_bits_activation)
]
return _ModelTransformer(model, transforms, set(layer_quantize_map.keys()),
layer_quantize_map).transform()
......
......@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Configs package definition."""
from official.projects.qat.vision.quantization import configs
......
......@@ -13,7 +13,9 @@
# limitations under the License.
"""Quantization helpers."""
from typing import Any, Dict
import tensorflow as tf
import tensorflow_model_optimization as tfmot
......@@ -47,3 +49,37 @@ class LayerQuantizerHelper(object):
for name in self._quantizers:
self._quantizer_vars[name] = self._quantizers[name].build(
tensor_shape=None, name=name, layer=self)
class NoOpActivation:
"""No-op activation which simply returns the incoming tensor.
This activation is required to distinguish between `keras.activations.linear`
which does the same thing. The main difference is that NoOpActivation should
not have any quantize operation applied to it.
"""
def __call__(self, x: tf.Tensor) -> tf.Tensor:
return x
def get_config(self) -> Dict[str, Any]:
"""Get a config of this object."""
return {}
def __eq__(self, other: Any) -> bool:
if not other or not isinstance(other, NoOpActivation):
return False
return True
def __ne__(self, other: Any) -> bool:
return not self.__eq__(other)
def quantize_wrapped_layer(cls, quantize_config):
def constructor(*arg, **kwargs):
return tfmot.quantization.keras.QuantizeWrapperV2(
cls(*arg, **kwargs), quantize_config)
return constructor
......@@ -102,10 +102,7 @@ class CustomLayerQuantize(
if bottleneck_layer['class_name'] in [
'Vision>Conv2DBNBlock', 'Vision>InvertedBottleneckBlock',
'Vision>SegmentationHead', 'Vision>SpatialPyramidPooling',
'Vision>ASPP',
# TODO(yeqing): Removes the Beta layers.
'Beta>Conv2DBNBlock', 'Beta>InvertedBottleneckBlock',
'Beta>SegmentationHead', 'Beta>SpatialPyramidPooling', 'Beta>ASPP'
'Vision>ASPP'
]:
layer_metadata = {'quantize_config': configs.NoOpQuantizeConfig()}
else:
......@@ -170,20 +167,7 @@ class QuantizeLayoutTransform(
quantized_nn_layers.SegmentationHeadQuantized),
CustomLayerQuantize('Vision>SpatialPyramidPooling',
quantized_nn_layers.SpatialPyramidPoolingQuantized),
CustomLayerQuantize('Vision>ASPP', quantized_nn_layers.ASPPQuantized),
# TODO(yeqing): Remove the `Beta` components.
CustomLayerQuantize('Beta>BottleneckBlock',
quantized_nn_blocks.BottleneckBlockQuantized),
CustomLayerQuantize(
'Beta>InvertedBottleneckBlock',
quantized_nn_blocks.InvertedBottleneckBlockQuantized),
CustomLayerQuantize('Beta>Conv2DBNBlock',
quantized_nn_blocks.Conv2DBNBlockQuantized),
CustomLayerQuantize('Beta>SegmentationHead',
quantized_nn_layers.SegmentationHeadQuantized),
CustomLayerQuantize('Beta>SpatialPyramidPooling',
quantized_nn_layers.SpatialPyramidPoolingQuantized),
CustomLayerQuantize('Beta>ASPP', quantized_nn_layers.ASPPQuantized)
CustomLayerQuantize('Vision>ASPP', quantized_nn_layers.ASPPQuantized)
]
return tfmot.quantization.keras.graph_transformations.model_transformer.ModelTransformer(
model, transforms,
......
......@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tasks package definition."""
from official.projects.qat.vision.tasks import image_classification
......@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Image classification task definition."""
import tensorflow as tf
......
......@@ -19,10 +19,10 @@ from absl.testing import parameterized
import orbit
import tensorflow as tf
from official import vision
from official.core import exp_factory
from official.modeling import optimization
from official.projects.qat.vision.tasks import image_classification as img_cls_task
from official.vision import beta
class ImageClassificationTaskTest(tf.test.TestCase, parameterized.TestCase):
......
......@@ -12,17 +12,16 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for RetinaNet task."""
# pylint: disable=unused-import
from absl.testing import parameterized
import orbit
import tensorflow as tf
from official import vision
from official.core import exp_factory
from official.modeling import optimization
from official.projects.qat.vision.tasks import retinanet
from official.vision import beta
from official.vision.configs import retinanet as exp_cfg
......
......@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""S3D model configurations."""
import dataclasses
from typing import Text
......
......@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Contains modules related to Inception networks."""
from typing import Callable, Dict, Optional, Sequence, Set, Text, Tuple, Type, Union
......
......@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
from absl.testing import parameterized
import tensorflow as tf
......
......@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Commonly used TensorFlow 2 network blocks."""
from typing import Any, Text, Sequence, Union
......
......@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
from absl import logging
from absl.testing import parameterized
......
......@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Contains the Tensorflow 2 version definition of S3D model.
S3D model is described in the following paper:
......
......@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for S3D model."""
from absl.testing import parameterized
......
......@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""TensorFlow Model Garden Vision training driver for S3D."""
from absl import app
......
......@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: disable=g-doc-return-or-yield,line-too-long
"""TEAMS experiments."""
import dataclasses
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment