Commit 50369291 authored by Fan Yang's avatar Fan Yang Committed by A. Unique TensorFlower
Browse files

Internal change.

PiperOrigin-RevId: 437372581
parent 0876884a
......@@ -98,19 +98,13 @@ class BottleneckBlockQuantized(tf.keras.layers.Layer):
self._norm_epsilon = norm_epsilon
self._kernel_regularizer = kernel_regularizer
self._bias_regularizer = bias_regularizer
if use_sync_bn:
self._norm = helper.quantize_wrapped_layer(
tf.keras.layers.experimental.SyncBatchNormalization,
configs.NoOpQuantizeConfig())
self._norm_with_quantize = helper.quantize_wrapped_layer(
tf.keras.layers.experimental.SyncBatchNormalization,
configs.Default8BitOutputQuantizeConfig())
else:
self._norm = helper.quantize_wrapped_layer(
tf.keras.layers.BatchNormalization, configs.NoOpQuantizeConfig())
self._norm_with_quantize = helper.quantize_wrapped_layer(
tf.keras.layers.BatchNormalization,
configs.Default8BitOutputQuantizeConfig())
norm_layer = (
tf.keras.layers.experimental.SyncBatchNormalization
if use_sync_bn else tf.keras.layers.BatchNormalization)
self._norm_with_quantize = helper.BatchNormalizationQuantized(norm_layer)
self._norm = helper.BatchNormalizationNoQuantized(norm_layer)
if tf.keras.backend.image_data_format() == 'channels_last':
self._bn_axis = -1
else:
......@@ -119,15 +113,11 @@ class BottleneckBlockQuantized(tf.keras.layers.Layer):
def build(self, input_shape: Optional[Union[Sequence[int], tf.Tensor]]):
"""Build variables and child layers to prepare for calling."""
conv2d_quantized = helper.quantize_wrapped_layer(
tf.keras.layers.Conv2D,
configs.Default8BitConvQuantizeConfig(['kernel'], ['activation'],
False))
if self._use_projection:
if self._resnetd_shortcut:
self._shortcut0 = tf.keras.layers.AveragePooling2D(
pool_size=2, strides=self._strides, padding='same')
self._shortcut1 = conv2d_quantized(
self._shortcut1 = helper.Conv2DQuantized(
filters=self._filters * 4,
kernel_size=1,
strides=1,
......@@ -137,7 +127,7 @@ class BottleneckBlockQuantized(tf.keras.layers.Layer):
bias_regularizer=self._bias_regularizer,
activation=helper.NoOpActivation())
else:
self._shortcut = conv2d_quantized(
self._shortcut = helper.Conv2DQuantized(
filters=self._filters * 4,
kernel_size=1,
strides=self._strides,
......@@ -153,7 +143,7 @@ class BottleneckBlockQuantized(tf.keras.layers.Layer):
epsilon=self._norm_epsilon,
trainable=self._bn_trainable)
self._conv1 = conv2d_quantized(
self._conv1 = helper.Conv2DQuantized(
filters=self._filters,
kernel_size=1,
strides=1,
......@@ -171,7 +161,7 @@ class BottleneckBlockQuantized(tf.keras.layers.Layer):
tf_utils.get_activation(self._activation, use_keras_layer=True),
configs.Default8BitActivationQuantizeConfig())
self._conv2 = conv2d_quantized(
self._conv2 = helper.Conv2DQuantized(
filters=self._filters,
kernel_size=3,
strides=self._strides,
......@@ -191,7 +181,7 @@ class BottleneckBlockQuantized(tf.keras.layers.Layer):
tf_utils.get_activation(self._activation, use_keras_layer=True),
configs.Default8BitActivationQuantizeConfig())
self._conv3 = conv2d_quantized(
self._conv3 = helper.Conv2DQuantized(
filters=self._filters * 4,
kernel_size=1,
strides=1,
......@@ -359,10 +349,8 @@ class Conv2DBNBlockQuantized(tf.keras.layers.Layer):
norm_layer = (
tf.keras.layers.experimental.SyncBatchNormalization
if use_sync_bn else tf.keras.layers.BatchNormalization)
self._norm_with_quantize = helper.quantize_wrapped_layer(
norm_layer, configs.Default8BitOutputQuantizeConfig())
self._norm = helper.quantize_wrapped_layer(norm_layer,
configs.NoOpQuantizeConfig())
self._norm_with_quantize = helper.BatchNormalizationQuantized(norm_layer)
self._norm = helper.BatchNormalizationNoQuantized(norm_layer)
if tf.keras.backend.image_data_format() == 'channels_last':
self._bn_axis = -1
......@@ -389,20 +377,15 @@ class Conv2DBNBlockQuantized(tf.keras.layers.Layer):
base_config = super(Conv2DBNBlockQuantized, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def _norm_by_activation(self, activation):
if activation in ['relu', 'relu6']:
return self._norm
return self._norm_with_quantize
def build(self, input_shape: Optional[Union[Sequence[int], tf.Tensor]]):
"""Build variables and child layers to prepare for calling."""
if self._use_explicit_padding and self._kernel_size > 1:
padding_size = nn_layers.get_padding_for_kernel_size(self._kernel_size)
self._pad = tf.keras.layers.ZeroPadding2D(padding_size)
conv2d_quantized = helper.quantize_wrapped_layer(
tf.keras.layers.Conv2D,
configs.Default8BitConvQuantizeConfig(['kernel'], ['activation'],
not self._use_normalization))
conv2d_quantized = (
helper.Conv2DQuantized
if self._use_normalization else helper.Conv2DOutputQuantized)
self._conv0 = conv2d_quantized(
filters=self._filters,
kernel_size=self._kernel_size,
......@@ -414,14 +397,15 @@ class Conv2DBNBlockQuantized(tf.keras.layers.Layer):
bias_regularizer=self._bias_regularizer,
activation=helper.NoOpActivation())
if self._use_normalization:
self._norm0 = self._norm_by_activation(self._activation)(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon)
self._norm0 = helper.norm_by_activation(self._activation,
self._norm_with_quantize,
self._norm)(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon)
self._activation_layer = tfmot.quantization.keras.QuantizeWrapperV2(
tf_utils.get_activation(self._activation, use_keras_layer=True),
configs.Default8BitActivationQuantizeConfig())
super(Conv2DBNBlockQuantized, self).build(input_shape)
def call(
......@@ -546,10 +530,8 @@ class InvertedBottleneckBlockQuantized(tf.keras.layers.Layer):
norm_layer = (
tf.keras.layers.experimental.SyncBatchNormalization
if use_sync_bn else tf.keras.layers.BatchNormalization)
self._norm_with_quantize = helper.quantize_wrapped_layer(
norm_layer, configs.Default8BitOutputQuantizeConfig())
self._norm = helper.quantize_wrapped_layer(norm_layer,
configs.NoOpQuantizeConfig())
self._norm_with_quantize = helper.BatchNormalizationQuantized(norm_layer)
self._norm = helper.BatchNormalizationNoQuantized(norm_layer)
if tf.keras.backend.image_data_format() == 'channels_last':
self._bn_axis = -1
......@@ -562,21 +544,8 @@ class InvertedBottleneckBlockQuantized(tf.keras.layers.Layer):
else:
self._depthsize_regularizer = None
def _norm_by_activation(self, activation):
if activation in ['relu', 'relu6']:
return self._norm
return self._norm_with_quantize
def build(self, input_shape: Optional[Union[Sequence[int], tf.Tensor]]):
"""Build variables and child layers to prepare for calling."""
conv2d_quantized = helper.quantize_wrapped_layer(
tf.keras.layers.Conv2D,
configs.Default8BitConvQuantizeConfig(['kernel'], ['activation'],
False))
depthwise_conv2d_quantized = helper.quantize_wrapped_layer(
tf.keras.layers.DepthwiseConv2D,
configs.Default8BitConvQuantizeConfig(['depthwise_kernel'],
['activation'], False))
expand_filters = self._in_filters
if self._expand_ratio > 1:
# First 1x1 conv for channel expansion.
......@@ -586,7 +555,7 @@ class InvertedBottleneckBlockQuantized(tf.keras.layers.Layer):
expand_kernel = 1 if self._use_depthwise else self._kernel_size
expand_stride = 1 if self._use_depthwise else self._strides
self._conv0 = conv2d_quantized(
self._conv0 = helper.Conv2DQuantized(
filters=expand_filters,
kernel_size=expand_kernel,
strides=expand_stride,
......@@ -596,17 +565,18 @@ class InvertedBottleneckBlockQuantized(tf.keras.layers.Layer):
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activation=helper.NoOpActivation())
self._norm0 = self._norm_by_activation(self._activation)(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon)
self._norm0 = helper.norm_by_activation(self._activation,
self._norm_with_quantize,
self._norm)(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon)
self._activation_layer = tfmot.quantization.keras.QuantizeWrapperV2(
tf_utils.get_activation(self._activation, use_keras_layer=True),
configs.Default8BitActivationQuantizeConfig())
if self._use_depthwise:
# Depthwise conv.
self._conv1 = depthwise_conv2d_quantized(
self._conv1 = helper.DepthwiseConv2DQuantized(
kernel_size=(self._kernel_size, self._kernel_size),
strides=self._strides,
padding='same',
......@@ -617,10 +587,12 @@ class InvertedBottleneckBlockQuantized(tf.keras.layers.Layer):
depthwise_regularizer=self._depthsize_regularizer,
bias_regularizer=self._bias_regularizer,
activation=helper.NoOpActivation())
self._norm1 = self._norm_by_activation(self._depthwise_activation)(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon)
self._norm1 = helper.norm_by_activation(self._depthwise_activation,
self._norm_with_quantize,
self._norm)(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon)
self._depthwise_activation_layer = (
tfmot.quantization.keras.QuantizeWrapperV2(
tf_utils.get_activation(self._depthwise_activation,
......@@ -648,7 +620,7 @@ class InvertedBottleneckBlockQuantized(tf.keras.layers.Layer):
self._squeeze_excitation = None
# Last 1x1 conv.
self._conv2 = conv2d_quantized(
self._conv2 = helper.Conv2DQuantized(
filters=self._out_filters,
kernel_size=1,
strides=1,
......
......@@ -124,19 +124,12 @@ class SqueezeExcitationQuantized(
return x
def build(self, input_shape):
conv2d_quantized = helper.quantize_wrapped_layer(
tf.keras.layers.Conv2D,
configs.Default8BitConvQuantizeConfig(['kernel'], ['activation'],
False))
conv2d_quantized_output_quantized = helper.quantize_wrapped_layer(
tf.keras.layers.Conv2D,
configs.Default8BitConvQuantizeConfig(['kernel'], ['activation'], True))
num_reduced_filters = nn_layers.make_divisible(
max(1, int(self._in_filters * self._se_ratio)),
divisor=self._divisible_by,
round_down_protect=self._round_down_protect)
self._se_reduce = conv2d_quantized(
self._se_reduce = helper.Conv2DQuantized(
filters=num_reduced_filters,
kernel_size=1,
strides=1,
......@@ -147,7 +140,7 @@ class SqueezeExcitationQuantized(
bias_regularizer=self._bias_regularizer,
activation=helper.NoOpActivation())
self._se_expand = conv2d_quantized_output_quantized(
self._se_expand = helper.Conv2DOutputQuantized(
filters=self._out_filters,
kernel_size=1,
strides=1,
......@@ -311,17 +304,6 @@ class SegmentationHeadQuantized(tf.keras.layers.Layer):
backbone_shape = input_shape[0]
use_depthwise_convolution = self._config_dict['use_depthwise_convolution']
random_initializer = tf.keras.initializers.RandomNormal(stddev=0.01)
conv2d_quantized = helper.quantize_wrapped_layer(
tf.keras.layers.Conv2D,
configs.Default8BitConvQuantizeConfig(['kernel'], ['activation'],
False))
conv2d_quantized_output_quantized = helper.quantize_wrapped_layer(
tf.keras.layers.Conv2D,
configs.Default8BitConvQuantizeConfig(['kernel'], ['activation'], True))
depthwise_conv2d_quantized = helper.quantize_wrapped_layer(
tf.keras.layers.DepthwiseConv2D,
configs.Default8BitConvQuantizeConfig(['depthwise_kernel'],
['activation'], False))
conv_kwargs = {
'kernel_size': 3 if not use_depthwise_convolution else 1,
'padding': 'same',
......@@ -334,13 +316,10 @@ class SegmentationHeadQuantized(tf.keras.layers.Layer):
tf.keras.layers.experimental.SyncBatchNormalization
if self._config_dict['use_sync_bn'] else
tf.keras.layers.BatchNormalization)
norm_with_quantize = helper.quantize_wrapped_layer(
norm_layer, configs.Default8BitOutputQuantizeConfig())
if self._config_dict['activation'] not in ['relu', 'relu6']:
norm = norm_with_quantize
else:
norm = helper.quantize_wrapped_layer(norm_layer,
configs.NoOpQuantizeConfig())
norm_with_quantize = helper.BatchNormalizationQuantized(norm_layer)
norm_no_quantize = helper.BatchNormalizationNoQuantized(norm_layer)
norm = helper.norm_by_activation(self._config_dict['activation'],
norm_with_quantize, norm_no_quantize)
bn_kwargs = {
'axis': self._bn_axis,
......@@ -350,7 +329,7 @@ class SegmentationHeadQuantized(tf.keras.layers.Layer):
if self._config_dict['feature_fusion'] == 'deeplabv3plus':
# Deeplabv3+ feature fusion layers.
self._dlv3p_conv = conv2d_quantized(
self._dlv3p_conv = helper.Conv2DQuantized(
kernel_size=1,
padding='same',
use_bias=False,
......@@ -369,7 +348,7 @@ class SegmentationHeadQuantized(tf.keras.layers.Layer):
for i in range(self._config_dict['num_convs']):
if use_depthwise_convolution:
self._convs.append(
depthwise_conv2d_quantized(
helper.DepthwiseConv2DQuantized(
name='segmentation_head_depthwise_conv_{}'.format(i),
kernel_size=3,
padding='same',
......@@ -382,7 +361,7 @@ class SegmentationHeadQuantized(tf.keras.layers.Layer):
self._norms.append(norm(name=norm_name, **bn_kwargs))
conv_name = 'segmentation_head_conv_{}'.format(i)
self._convs.append(
conv2d_quantized(
helper.Conv2DQuantized(
name=conv_name,
filters=self._config_dict['num_filters'],
activation=helper.NoOpActivation(),
......@@ -390,7 +369,7 @@ class SegmentationHeadQuantized(tf.keras.layers.Layer):
norm_name = 'segmentation_head_norm_{}'.format(i)
self._norms.append(norm(name=norm_name, **bn_kwargs))
self._classifier = conv2d_quantized_output_quantized(
self._classifier = helper.Conv2DOutputQuantized(
name='segmentation_output',
filters=self._config_dict['num_classes'],
kernel_size=self._config_dict['prediction_kernel_size'],
......@@ -401,20 +380,14 @@ class SegmentationHeadQuantized(tf.keras.layers.Layer):
bias_regularizer=self._config_dict['bias_regularizer'],
activation=helper.NoOpActivation())
upsampling = helper.quantize_wrapped_layer(
tf.keras.layers.UpSampling2D,
configs.Default8BitQuantizeConfig([], [], True))
self._upsampling_layer = upsampling(
self._upsampling_layer = helper.UpSampling2DQuantized(
size=(self._config_dict['upsample_factor'],
self._config_dict['upsample_factor']),
interpolation='nearest')
self._resizing_layer = tf.keras.layers.Resizing(
backbone_shape[1], backbone_shape[2], interpolation='bilinear')
concat = helper.quantize_wrapped_layer(
tf.keras.layers.Concatenate,
configs.Default8BitQuantizeConfig([], [], True))
self._concat_layer = concat(axis=self._bn_axis)
self._concat_layer = helper.ConcatenateQuantized(axis=self._bn_axis)
super().build(input_shape)
......@@ -560,26 +533,14 @@ class SpatialPyramidPoolingQuantized(nn_layers.SpatialPyramidPooling):
norm_layer = (
tf.keras.layers.experimental.SyncBatchNormalization
if self._use_sync_bn else tf.keras.layers.BatchNormalization)
norm_with_quantize = helper.quantize_wrapped_layer(
norm_layer, configs.Default8BitOutputQuantizeConfig())
if self._activation not in ['relu', 'relu6']:
norm = norm_with_quantize
else:
norm = helper.quantize_wrapped_layer(norm_layer,
configs.NoOpQuantizeConfig())
conv2d_quantized = helper.quantize_wrapped_layer(
tf.keras.layers.Conv2D,
configs.Default8BitConvQuantizeConfig(['kernel'], ['activation'],
False))
depthwise_conv2d_quantized_output_quantized = helper.quantize_wrapped_layer(
tf.keras.layers.DepthwiseConv2D,
configs.Default8BitConvQuantizeConfig(['depthwise_kernel'],
['activation'], True))
norm_with_quantize = helper.BatchNormalizationQuantized(norm_layer)
norm_no_quantize = helper.BatchNormalizationNoQuantized(norm_layer)
norm = helper.norm_by_activation(self._activation, norm_with_quantize,
norm_no_quantize)
self.aspp_layers = []
conv1 = conv2d_quantized(
conv1 = helper.Conv2DQuantized(
filters=self._output_channels,
kernel_size=(1, 1),
kernel_initializer=self._kernel_initializer,
......@@ -598,7 +559,7 @@ class SpatialPyramidPoolingQuantized(nn_layers.SpatialPyramidPooling):
kernel_size = (3, 3)
if self._use_depthwise_convolution:
leading_layers += [
depthwise_conv2d_quantized_output_quantized(
helper.DepthwiseConv2DOutputQuantized(
depth_multiplier=1,
kernel_size=kernel_size,
padding='same',
......@@ -610,7 +571,7 @@ class SpatialPyramidPoolingQuantized(nn_layers.SpatialPyramidPooling):
]
kernel_size = (1, 1)
conv_dilation = leading_layers + [
conv2d_quantized(
helper.Conv2DQuantized(
filters=self._output_channels,
kernel_size=kernel_size,
padding='same',
......@@ -629,22 +590,13 @@ class SpatialPyramidPoolingQuantized(nn_layers.SpatialPyramidPooling):
if self._pool_kernel_size is None:
pooling = [
helper.quantize_wrapped_layer(
tf.keras.layers.GlobalAveragePooling2D,
configs.Default8BitQuantizeConfig([], [], True))(),
helper.quantize_wrapped_layer(
tf.keras.layers.Reshape,
configs.Default8BitQuantizeConfig([], [], True))((1, 1, channels))
helper.GlobalAveragePooling2DQuantized(),
helper.ReshapeQuantized((1, 1, channels))
]
else:
pooling = [
helper.quantize_wrapped_layer(
tf.keras.layers.AveragePooling2D,
configs.Default8BitQuantizeConfig([], [],
True))(self._pool_kernel_size)
]
pooling = [helper.AveragePooling2DQuantized(self._pool_kernel_size)]
conv2 = conv2d_quantized(
conv2 = helper.Conv2DQuantized(
filters=self._output_channels,
kernel_size=(1, 1),
kernel_initializer=self._kernel_initializer,
......@@ -657,15 +609,11 @@ class SpatialPyramidPoolingQuantized(nn_layers.SpatialPyramidPooling):
epsilon=self._batchnorm_epsilon)
self.aspp_layers.append(pooling + [conv2, norm2])
resizing = helper.quantize_wrapped_layer(
tf.keras.layers.Resizing, configs.Default8BitQuantizeConfig([], [],
True))
self._resizing_layer = resizing(
self._resizing_layer = helper.ResizingQuantized(
height, width, interpolation=self._interpolation)
self._projection = [
conv2d_quantized(
helper.Conv2DQuantized(
filters=self._output_channels,
kernel_size=(1, 1),
kernel_initializer=self._kernel_initializer,
......@@ -678,10 +626,7 @@ class SpatialPyramidPoolingQuantized(nn_layers.SpatialPyramidPooling):
epsilon=self._batchnorm_epsilon)
]
self._dropout_layer = tf.keras.layers.Dropout(rate=self._dropout)
concat = helper.quantize_wrapped_layer(
tf.keras.layers.Concatenate,
configs.Default8BitQuantizeConfig([], [], True))
self._concat_layer = concat(axis=-1)
self._concat_layer = helper.ConcatenateQuantized(axis=-1)
def call(self,
inputs: tf.Tensor,
......
......@@ -16,7 +16,9 @@
from typing import Any, Dict
import tensorflow as tf
import tensorflow_model_optimization as tfmot
from official.projects.qat.vision.quantization import configs
class LayerQuantizerHelper(object):
......@@ -83,3 +85,48 @@ def quantize_wrapped_layer(cls, quantize_config):
cls(*arg, **kwargs), quantize_config)
return constructor
def norm_by_activation(activation, norm_quantized, norm_no_quantized):
if activation not in ['relu', 'relu6']:
return norm_quantized
else:
return norm_no_quantized
Conv2DQuantized = quantize_wrapped_layer(
tf.keras.layers.Conv2D,
configs.Default8BitConvQuantizeConfig(['kernel'], ['activation'], False))
Conv2DOutputQuantized = quantize_wrapped_layer(
tf.keras.layers.Conv2D,
configs.Default8BitConvQuantizeConfig(['kernel'], ['activation'], True))
DepthwiseConv2DQuantized = quantize_wrapped_layer(
tf.keras.layers.DepthwiseConv2D,
configs.Default8BitConvQuantizeConfig(['depthwise_kernel'], ['activation'],
False))
DepthwiseConv2DOutputQuantized = quantize_wrapped_layer(
tf.keras.layers.DepthwiseConv2D,
configs.Default8BitConvQuantizeConfig(['depthwise_kernel'], ['activation'],
True))
GlobalAveragePooling2DQuantized = quantize_wrapped_layer(
tf.keras.layers.GlobalAveragePooling2D,
configs.Default8BitQuantizeConfig([], [], True))
AveragePooling2DQuantized = quantize_wrapped_layer(
tf.keras.layers.AveragePooling2D,
configs.Default8BitQuantizeConfig([], [], True))
ResizingQuantized = quantize_wrapped_layer(
tf.keras.layers.Resizing, configs.Default8BitQuantizeConfig([], [], True))
ConcatenateQuantized = quantize_wrapped_layer(
tf.keras.layers.Concatenate, configs.Default8BitQuantizeConfig([], [],
True))
UpSampling2DQuantized = quantize_wrapped_layer(
tf.keras.layers.UpSampling2D, configs.Default8BitQuantizeConfig([], [],
True))
ReshapeQuantized = quantize_wrapped_layer(
tf.keras.layers.Reshape, configs.Default8BitQuantizeConfig([], [], True))
# pylint:disable=g-long-lambda
BatchNormalizationQuantized = lambda norm_layer: quantize_wrapped_layer(
norm_layer, configs.Default8BitOutputQuantizeConfig())
BatchNormalizationNoQuantized = lambda norm_layer: quantize_wrapped_layer(
norm_layer, configs.NoOpQuantizeConfig())
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment