Commit 50369291 authored by Fan Yang's avatar Fan Yang Committed by A. Unique TensorFlower
Browse files

Internal change.

PiperOrigin-RevId: 437372581
parent 0876884a
...@@ -98,19 +98,13 @@ class BottleneckBlockQuantized(tf.keras.layers.Layer): ...@@ -98,19 +98,13 @@ class BottleneckBlockQuantized(tf.keras.layers.Layer):
self._norm_epsilon = norm_epsilon self._norm_epsilon = norm_epsilon
self._kernel_regularizer = kernel_regularizer self._kernel_regularizer = kernel_regularizer
self._bias_regularizer = bias_regularizer self._bias_regularizer = bias_regularizer
if use_sync_bn:
self._norm = helper.quantize_wrapped_layer( norm_layer = (
tf.keras.layers.experimental.SyncBatchNormalization, tf.keras.layers.experimental.SyncBatchNormalization
configs.NoOpQuantizeConfig()) if use_sync_bn else tf.keras.layers.BatchNormalization)
self._norm_with_quantize = helper.quantize_wrapped_layer( self._norm_with_quantize = helper.BatchNormalizationQuantized(norm_layer)
tf.keras.layers.experimental.SyncBatchNormalization, self._norm = helper.BatchNormalizationNoQuantized(norm_layer)
configs.Default8BitOutputQuantizeConfig())
else:
self._norm = helper.quantize_wrapped_layer(
tf.keras.layers.BatchNormalization, configs.NoOpQuantizeConfig())
self._norm_with_quantize = helper.quantize_wrapped_layer(
tf.keras.layers.BatchNormalization,
configs.Default8BitOutputQuantizeConfig())
if tf.keras.backend.image_data_format() == 'channels_last': if tf.keras.backend.image_data_format() == 'channels_last':
self._bn_axis = -1 self._bn_axis = -1
else: else:
...@@ -119,15 +113,11 @@ class BottleneckBlockQuantized(tf.keras.layers.Layer): ...@@ -119,15 +113,11 @@ class BottleneckBlockQuantized(tf.keras.layers.Layer):
def build(self, input_shape: Optional[Union[Sequence[int], tf.Tensor]]): def build(self, input_shape: Optional[Union[Sequence[int], tf.Tensor]]):
"""Build variables and child layers to prepare for calling.""" """Build variables and child layers to prepare for calling."""
conv2d_quantized = helper.quantize_wrapped_layer(
tf.keras.layers.Conv2D,
configs.Default8BitConvQuantizeConfig(['kernel'], ['activation'],
False))
if self._use_projection: if self._use_projection:
if self._resnetd_shortcut: if self._resnetd_shortcut:
self._shortcut0 = tf.keras.layers.AveragePooling2D( self._shortcut0 = tf.keras.layers.AveragePooling2D(
pool_size=2, strides=self._strides, padding='same') pool_size=2, strides=self._strides, padding='same')
self._shortcut1 = conv2d_quantized( self._shortcut1 = helper.Conv2DQuantized(
filters=self._filters * 4, filters=self._filters * 4,
kernel_size=1, kernel_size=1,
strides=1, strides=1,
...@@ -137,7 +127,7 @@ class BottleneckBlockQuantized(tf.keras.layers.Layer): ...@@ -137,7 +127,7 @@ class BottleneckBlockQuantized(tf.keras.layers.Layer):
bias_regularizer=self._bias_regularizer, bias_regularizer=self._bias_regularizer,
activation=helper.NoOpActivation()) activation=helper.NoOpActivation())
else: else:
self._shortcut = conv2d_quantized( self._shortcut = helper.Conv2DQuantized(
filters=self._filters * 4, filters=self._filters * 4,
kernel_size=1, kernel_size=1,
strides=self._strides, strides=self._strides,
...@@ -153,7 +143,7 @@ class BottleneckBlockQuantized(tf.keras.layers.Layer): ...@@ -153,7 +143,7 @@ class BottleneckBlockQuantized(tf.keras.layers.Layer):
epsilon=self._norm_epsilon, epsilon=self._norm_epsilon,
trainable=self._bn_trainable) trainable=self._bn_trainable)
self._conv1 = conv2d_quantized( self._conv1 = helper.Conv2DQuantized(
filters=self._filters, filters=self._filters,
kernel_size=1, kernel_size=1,
strides=1, strides=1,
...@@ -171,7 +161,7 @@ class BottleneckBlockQuantized(tf.keras.layers.Layer): ...@@ -171,7 +161,7 @@ class BottleneckBlockQuantized(tf.keras.layers.Layer):
tf_utils.get_activation(self._activation, use_keras_layer=True), tf_utils.get_activation(self._activation, use_keras_layer=True),
configs.Default8BitActivationQuantizeConfig()) configs.Default8BitActivationQuantizeConfig())
self._conv2 = conv2d_quantized( self._conv2 = helper.Conv2DQuantized(
filters=self._filters, filters=self._filters,
kernel_size=3, kernel_size=3,
strides=self._strides, strides=self._strides,
...@@ -191,7 +181,7 @@ class BottleneckBlockQuantized(tf.keras.layers.Layer): ...@@ -191,7 +181,7 @@ class BottleneckBlockQuantized(tf.keras.layers.Layer):
tf_utils.get_activation(self._activation, use_keras_layer=True), tf_utils.get_activation(self._activation, use_keras_layer=True),
configs.Default8BitActivationQuantizeConfig()) configs.Default8BitActivationQuantizeConfig())
self._conv3 = conv2d_quantized( self._conv3 = helper.Conv2DQuantized(
filters=self._filters * 4, filters=self._filters * 4,
kernel_size=1, kernel_size=1,
strides=1, strides=1,
...@@ -359,10 +349,8 @@ class Conv2DBNBlockQuantized(tf.keras.layers.Layer): ...@@ -359,10 +349,8 @@ class Conv2DBNBlockQuantized(tf.keras.layers.Layer):
norm_layer = ( norm_layer = (
tf.keras.layers.experimental.SyncBatchNormalization tf.keras.layers.experimental.SyncBatchNormalization
if use_sync_bn else tf.keras.layers.BatchNormalization) if use_sync_bn else tf.keras.layers.BatchNormalization)
self._norm_with_quantize = helper.quantize_wrapped_layer( self._norm_with_quantize = helper.BatchNormalizationQuantized(norm_layer)
norm_layer, configs.Default8BitOutputQuantizeConfig()) self._norm = helper.BatchNormalizationNoQuantized(norm_layer)
self._norm = helper.quantize_wrapped_layer(norm_layer,
configs.NoOpQuantizeConfig())
if tf.keras.backend.image_data_format() == 'channels_last': if tf.keras.backend.image_data_format() == 'channels_last':
self._bn_axis = -1 self._bn_axis = -1
...@@ -389,20 +377,15 @@ class Conv2DBNBlockQuantized(tf.keras.layers.Layer): ...@@ -389,20 +377,15 @@ class Conv2DBNBlockQuantized(tf.keras.layers.Layer):
base_config = super(Conv2DBNBlockQuantized, self).get_config() base_config = super(Conv2DBNBlockQuantized, self).get_config()
return dict(list(base_config.items()) + list(config.items())) return dict(list(base_config.items()) + list(config.items()))
def _norm_by_activation(self, activation):
if activation in ['relu', 'relu6']:
return self._norm
return self._norm_with_quantize
def build(self, input_shape: Optional[Union[Sequence[int], tf.Tensor]]): def build(self, input_shape: Optional[Union[Sequence[int], tf.Tensor]]):
"""Build variables and child layers to prepare for calling.""" """Build variables and child layers to prepare for calling."""
if self._use_explicit_padding and self._kernel_size > 1: if self._use_explicit_padding and self._kernel_size > 1:
padding_size = nn_layers.get_padding_for_kernel_size(self._kernel_size) padding_size = nn_layers.get_padding_for_kernel_size(self._kernel_size)
self._pad = tf.keras.layers.ZeroPadding2D(padding_size) self._pad = tf.keras.layers.ZeroPadding2D(padding_size)
conv2d_quantized = helper.quantize_wrapped_layer( conv2d_quantized = (
tf.keras.layers.Conv2D, helper.Conv2DQuantized
configs.Default8BitConvQuantizeConfig(['kernel'], ['activation'], if self._use_normalization else helper.Conv2DOutputQuantized)
not self._use_normalization))
self._conv0 = conv2d_quantized( self._conv0 = conv2d_quantized(
filters=self._filters, filters=self._filters,
kernel_size=self._kernel_size, kernel_size=self._kernel_size,
...@@ -414,14 +397,15 @@ class Conv2DBNBlockQuantized(tf.keras.layers.Layer): ...@@ -414,14 +397,15 @@ class Conv2DBNBlockQuantized(tf.keras.layers.Layer):
bias_regularizer=self._bias_regularizer, bias_regularizer=self._bias_regularizer,
activation=helper.NoOpActivation()) activation=helper.NoOpActivation())
if self._use_normalization: if self._use_normalization:
self._norm0 = self._norm_by_activation(self._activation)( self._norm0 = helper.norm_by_activation(self._activation,
self._norm_with_quantize,
self._norm)(
axis=self._bn_axis, axis=self._bn_axis,
momentum=self._norm_momentum, momentum=self._norm_momentum,
epsilon=self._norm_epsilon) epsilon=self._norm_epsilon)
self._activation_layer = tfmot.quantization.keras.QuantizeWrapperV2( self._activation_layer = tfmot.quantization.keras.QuantizeWrapperV2(
tf_utils.get_activation(self._activation, use_keras_layer=True), tf_utils.get_activation(self._activation, use_keras_layer=True),
configs.Default8BitActivationQuantizeConfig()) configs.Default8BitActivationQuantizeConfig())
super(Conv2DBNBlockQuantized, self).build(input_shape) super(Conv2DBNBlockQuantized, self).build(input_shape)
def call( def call(
...@@ -546,10 +530,8 @@ class InvertedBottleneckBlockQuantized(tf.keras.layers.Layer): ...@@ -546,10 +530,8 @@ class InvertedBottleneckBlockQuantized(tf.keras.layers.Layer):
norm_layer = ( norm_layer = (
tf.keras.layers.experimental.SyncBatchNormalization tf.keras.layers.experimental.SyncBatchNormalization
if use_sync_bn else tf.keras.layers.BatchNormalization) if use_sync_bn else tf.keras.layers.BatchNormalization)
self._norm_with_quantize = helper.quantize_wrapped_layer( self._norm_with_quantize = helper.BatchNormalizationQuantized(norm_layer)
norm_layer, configs.Default8BitOutputQuantizeConfig()) self._norm = helper.BatchNormalizationNoQuantized(norm_layer)
self._norm = helper.quantize_wrapped_layer(norm_layer,
configs.NoOpQuantizeConfig())
if tf.keras.backend.image_data_format() == 'channels_last': if tf.keras.backend.image_data_format() == 'channels_last':
self._bn_axis = -1 self._bn_axis = -1
...@@ -562,21 +544,8 @@ class InvertedBottleneckBlockQuantized(tf.keras.layers.Layer): ...@@ -562,21 +544,8 @@ class InvertedBottleneckBlockQuantized(tf.keras.layers.Layer):
else: else:
self._depthsize_regularizer = None self._depthsize_regularizer = None
def _norm_by_activation(self, activation):
if activation in ['relu', 'relu6']:
return self._norm
return self._norm_with_quantize
def build(self, input_shape: Optional[Union[Sequence[int], tf.Tensor]]): def build(self, input_shape: Optional[Union[Sequence[int], tf.Tensor]]):
"""Build variables and child layers to prepare for calling.""" """Build variables and child layers to prepare for calling."""
conv2d_quantized = helper.quantize_wrapped_layer(
tf.keras.layers.Conv2D,
configs.Default8BitConvQuantizeConfig(['kernel'], ['activation'],
False))
depthwise_conv2d_quantized = helper.quantize_wrapped_layer(
tf.keras.layers.DepthwiseConv2D,
configs.Default8BitConvQuantizeConfig(['depthwise_kernel'],
['activation'], False))
expand_filters = self._in_filters expand_filters = self._in_filters
if self._expand_ratio > 1: if self._expand_ratio > 1:
# First 1x1 conv for channel expansion. # First 1x1 conv for channel expansion.
...@@ -586,7 +555,7 @@ class InvertedBottleneckBlockQuantized(tf.keras.layers.Layer): ...@@ -586,7 +555,7 @@ class InvertedBottleneckBlockQuantized(tf.keras.layers.Layer):
expand_kernel = 1 if self._use_depthwise else self._kernel_size expand_kernel = 1 if self._use_depthwise else self._kernel_size
expand_stride = 1 if self._use_depthwise else self._strides expand_stride = 1 if self._use_depthwise else self._strides
self._conv0 = conv2d_quantized( self._conv0 = helper.Conv2DQuantized(
filters=expand_filters, filters=expand_filters,
kernel_size=expand_kernel, kernel_size=expand_kernel,
strides=expand_stride, strides=expand_stride,
...@@ -596,17 +565,18 @@ class InvertedBottleneckBlockQuantized(tf.keras.layers.Layer): ...@@ -596,17 +565,18 @@ class InvertedBottleneckBlockQuantized(tf.keras.layers.Layer):
kernel_regularizer=self._kernel_regularizer, kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer, bias_regularizer=self._bias_regularizer,
activation=helper.NoOpActivation()) activation=helper.NoOpActivation())
self._norm0 = self._norm_by_activation(self._activation)( self._norm0 = helper.norm_by_activation(self._activation,
self._norm_with_quantize,
self._norm)(
axis=self._bn_axis, axis=self._bn_axis,
momentum=self._norm_momentum, momentum=self._norm_momentum,
epsilon=self._norm_epsilon) epsilon=self._norm_epsilon)
self._activation_layer = tfmot.quantization.keras.QuantizeWrapperV2( self._activation_layer = tfmot.quantization.keras.QuantizeWrapperV2(
tf_utils.get_activation(self._activation, use_keras_layer=True), tf_utils.get_activation(self._activation, use_keras_layer=True),
configs.Default8BitActivationQuantizeConfig()) configs.Default8BitActivationQuantizeConfig())
if self._use_depthwise: if self._use_depthwise:
# Depthwise conv. # Depthwise conv.
self._conv1 = depthwise_conv2d_quantized( self._conv1 = helper.DepthwiseConv2DQuantized(
kernel_size=(self._kernel_size, self._kernel_size), kernel_size=(self._kernel_size, self._kernel_size),
strides=self._strides, strides=self._strides,
padding='same', padding='same',
...@@ -617,7 +587,9 @@ class InvertedBottleneckBlockQuantized(tf.keras.layers.Layer): ...@@ -617,7 +587,9 @@ class InvertedBottleneckBlockQuantized(tf.keras.layers.Layer):
depthwise_regularizer=self._depthsize_regularizer, depthwise_regularizer=self._depthsize_regularizer,
bias_regularizer=self._bias_regularizer, bias_regularizer=self._bias_regularizer,
activation=helper.NoOpActivation()) activation=helper.NoOpActivation())
self._norm1 = self._norm_by_activation(self._depthwise_activation)( self._norm1 = helper.norm_by_activation(self._depthwise_activation,
self._norm_with_quantize,
self._norm)(
axis=self._bn_axis, axis=self._bn_axis,
momentum=self._norm_momentum, momentum=self._norm_momentum,
epsilon=self._norm_epsilon) epsilon=self._norm_epsilon)
...@@ -648,7 +620,7 @@ class InvertedBottleneckBlockQuantized(tf.keras.layers.Layer): ...@@ -648,7 +620,7 @@ class InvertedBottleneckBlockQuantized(tf.keras.layers.Layer):
self._squeeze_excitation = None self._squeeze_excitation = None
# Last 1x1 conv. # Last 1x1 conv.
self._conv2 = conv2d_quantized( self._conv2 = helper.Conv2DQuantized(
filters=self._out_filters, filters=self._out_filters,
kernel_size=1, kernel_size=1,
strides=1, strides=1,
......
...@@ -124,19 +124,12 @@ class SqueezeExcitationQuantized( ...@@ -124,19 +124,12 @@ class SqueezeExcitationQuantized(
return x return x
def build(self, input_shape): def build(self, input_shape):
conv2d_quantized = helper.quantize_wrapped_layer(
tf.keras.layers.Conv2D,
configs.Default8BitConvQuantizeConfig(['kernel'], ['activation'],
False))
conv2d_quantized_output_quantized = helper.quantize_wrapped_layer(
tf.keras.layers.Conv2D,
configs.Default8BitConvQuantizeConfig(['kernel'], ['activation'], True))
num_reduced_filters = nn_layers.make_divisible( num_reduced_filters = nn_layers.make_divisible(
max(1, int(self._in_filters * self._se_ratio)), max(1, int(self._in_filters * self._se_ratio)),
divisor=self._divisible_by, divisor=self._divisible_by,
round_down_protect=self._round_down_protect) round_down_protect=self._round_down_protect)
self._se_reduce = conv2d_quantized( self._se_reduce = helper.Conv2DQuantized(
filters=num_reduced_filters, filters=num_reduced_filters,
kernel_size=1, kernel_size=1,
strides=1, strides=1,
...@@ -147,7 +140,7 @@ class SqueezeExcitationQuantized( ...@@ -147,7 +140,7 @@ class SqueezeExcitationQuantized(
bias_regularizer=self._bias_regularizer, bias_regularizer=self._bias_regularizer,
activation=helper.NoOpActivation()) activation=helper.NoOpActivation())
self._se_expand = conv2d_quantized_output_quantized( self._se_expand = helper.Conv2DOutputQuantized(
filters=self._out_filters, filters=self._out_filters,
kernel_size=1, kernel_size=1,
strides=1, strides=1,
...@@ -311,17 +304,6 @@ class SegmentationHeadQuantized(tf.keras.layers.Layer): ...@@ -311,17 +304,6 @@ class SegmentationHeadQuantized(tf.keras.layers.Layer):
backbone_shape = input_shape[0] backbone_shape = input_shape[0]
use_depthwise_convolution = self._config_dict['use_depthwise_convolution'] use_depthwise_convolution = self._config_dict['use_depthwise_convolution']
random_initializer = tf.keras.initializers.RandomNormal(stddev=0.01) random_initializer = tf.keras.initializers.RandomNormal(stddev=0.01)
conv2d_quantized = helper.quantize_wrapped_layer(
tf.keras.layers.Conv2D,
configs.Default8BitConvQuantizeConfig(['kernel'], ['activation'],
False))
conv2d_quantized_output_quantized = helper.quantize_wrapped_layer(
tf.keras.layers.Conv2D,
configs.Default8BitConvQuantizeConfig(['kernel'], ['activation'], True))
depthwise_conv2d_quantized = helper.quantize_wrapped_layer(
tf.keras.layers.DepthwiseConv2D,
configs.Default8BitConvQuantizeConfig(['depthwise_kernel'],
['activation'], False))
conv_kwargs = { conv_kwargs = {
'kernel_size': 3 if not use_depthwise_convolution else 1, 'kernel_size': 3 if not use_depthwise_convolution else 1,
'padding': 'same', 'padding': 'same',
...@@ -334,13 +316,10 @@ class SegmentationHeadQuantized(tf.keras.layers.Layer): ...@@ -334,13 +316,10 @@ class SegmentationHeadQuantized(tf.keras.layers.Layer):
tf.keras.layers.experimental.SyncBatchNormalization tf.keras.layers.experimental.SyncBatchNormalization
if self._config_dict['use_sync_bn'] else if self._config_dict['use_sync_bn'] else
tf.keras.layers.BatchNormalization) tf.keras.layers.BatchNormalization)
norm_with_quantize = helper.quantize_wrapped_layer( norm_with_quantize = helper.BatchNormalizationQuantized(norm_layer)
norm_layer, configs.Default8BitOutputQuantizeConfig()) norm_no_quantize = helper.BatchNormalizationNoQuantized(norm_layer)
if self._config_dict['activation'] not in ['relu', 'relu6']: norm = helper.norm_by_activation(self._config_dict['activation'],
norm = norm_with_quantize norm_with_quantize, norm_no_quantize)
else:
norm = helper.quantize_wrapped_layer(norm_layer,
configs.NoOpQuantizeConfig())
bn_kwargs = { bn_kwargs = {
'axis': self._bn_axis, 'axis': self._bn_axis,
...@@ -350,7 +329,7 @@ class SegmentationHeadQuantized(tf.keras.layers.Layer): ...@@ -350,7 +329,7 @@ class SegmentationHeadQuantized(tf.keras.layers.Layer):
if self._config_dict['feature_fusion'] == 'deeplabv3plus': if self._config_dict['feature_fusion'] == 'deeplabv3plus':
# Deeplabv3+ feature fusion layers. # Deeplabv3+ feature fusion layers.
self._dlv3p_conv = conv2d_quantized( self._dlv3p_conv = helper.Conv2DQuantized(
kernel_size=1, kernel_size=1,
padding='same', padding='same',
use_bias=False, use_bias=False,
...@@ -369,7 +348,7 @@ class SegmentationHeadQuantized(tf.keras.layers.Layer): ...@@ -369,7 +348,7 @@ class SegmentationHeadQuantized(tf.keras.layers.Layer):
for i in range(self._config_dict['num_convs']): for i in range(self._config_dict['num_convs']):
if use_depthwise_convolution: if use_depthwise_convolution:
self._convs.append( self._convs.append(
depthwise_conv2d_quantized( helper.DepthwiseConv2DQuantized(
name='segmentation_head_depthwise_conv_{}'.format(i), name='segmentation_head_depthwise_conv_{}'.format(i),
kernel_size=3, kernel_size=3,
padding='same', padding='same',
...@@ -382,7 +361,7 @@ class SegmentationHeadQuantized(tf.keras.layers.Layer): ...@@ -382,7 +361,7 @@ class SegmentationHeadQuantized(tf.keras.layers.Layer):
self._norms.append(norm(name=norm_name, **bn_kwargs)) self._norms.append(norm(name=norm_name, **bn_kwargs))
conv_name = 'segmentation_head_conv_{}'.format(i) conv_name = 'segmentation_head_conv_{}'.format(i)
self._convs.append( self._convs.append(
conv2d_quantized( helper.Conv2DQuantized(
name=conv_name, name=conv_name,
filters=self._config_dict['num_filters'], filters=self._config_dict['num_filters'],
activation=helper.NoOpActivation(), activation=helper.NoOpActivation(),
...@@ -390,7 +369,7 @@ class SegmentationHeadQuantized(tf.keras.layers.Layer): ...@@ -390,7 +369,7 @@ class SegmentationHeadQuantized(tf.keras.layers.Layer):
norm_name = 'segmentation_head_norm_{}'.format(i) norm_name = 'segmentation_head_norm_{}'.format(i)
self._norms.append(norm(name=norm_name, **bn_kwargs)) self._norms.append(norm(name=norm_name, **bn_kwargs))
self._classifier = conv2d_quantized_output_quantized( self._classifier = helper.Conv2DOutputQuantized(
name='segmentation_output', name='segmentation_output',
filters=self._config_dict['num_classes'], filters=self._config_dict['num_classes'],
kernel_size=self._config_dict['prediction_kernel_size'], kernel_size=self._config_dict['prediction_kernel_size'],
...@@ -401,20 +380,14 @@ class SegmentationHeadQuantized(tf.keras.layers.Layer): ...@@ -401,20 +380,14 @@ class SegmentationHeadQuantized(tf.keras.layers.Layer):
bias_regularizer=self._config_dict['bias_regularizer'], bias_regularizer=self._config_dict['bias_regularizer'],
activation=helper.NoOpActivation()) activation=helper.NoOpActivation())
upsampling = helper.quantize_wrapped_layer( self._upsampling_layer = helper.UpSampling2DQuantized(
tf.keras.layers.UpSampling2D,
configs.Default8BitQuantizeConfig([], [], True))
self._upsampling_layer = upsampling(
size=(self._config_dict['upsample_factor'], size=(self._config_dict['upsample_factor'],
self._config_dict['upsample_factor']), self._config_dict['upsample_factor']),
interpolation='nearest') interpolation='nearest')
self._resizing_layer = tf.keras.layers.Resizing( self._resizing_layer = tf.keras.layers.Resizing(
backbone_shape[1], backbone_shape[2], interpolation='bilinear') backbone_shape[1], backbone_shape[2], interpolation='bilinear')
concat = helper.quantize_wrapped_layer( self._concat_layer = helper.ConcatenateQuantized(axis=self._bn_axis)
tf.keras.layers.Concatenate,
configs.Default8BitQuantizeConfig([], [], True))
self._concat_layer = concat(axis=self._bn_axis)
super().build(input_shape) super().build(input_shape)
...@@ -560,26 +533,14 @@ class SpatialPyramidPoolingQuantized(nn_layers.SpatialPyramidPooling): ...@@ -560,26 +533,14 @@ class SpatialPyramidPoolingQuantized(nn_layers.SpatialPyramidPooling):
norm_layer = ( norm_layer = (
tf.keras.layers.experimental.SyncBatchNormalization tf.keras.layers.experimental.SyncBatchNormalization
if self._use_sync_bn else tf.keras.layers.BatchNormalization) if self._use_sync_bn else tf.keras.layers.BatchNormalization)
norm_with_quantize = helper.quantize_wrapped_layer( norm_with_quantize = helper.BatchNormalizationQuantized(norm_layer)
norm_layer, configs.Default8BitOutputQuantizeConfig()) norm_no_quantize = helper.BatchNormalizationNoQuantized(norm_layer)
if self._activation not in ['relu', 'relu6']: norm = helper.norm_by_activation(self._activation, norm_with_quantize,
norm = norm_with_quantize norm_no_quantize)
else:
norm = helper.quantize_wrapped_layer(norm_layer,
configs.NoOpQuantizeConfig())
conv2d_quantized = helper.quantize_wrapped_layer(
tf.keras.layers.Conv2D,
configs.Default8BitConvQuantizeConfig(['kernel'], ['activation'],
False))
depthwise_conv2d_quantized_output_quantized = helper.quantize_wrapped_layer(
tf.keras.layers.DepthwiseConv2D,
configs.Default8BitConvQuantizeConfig(['depthwise_kernel'],
['activation'], True))
self.aspp_layers = [] self.aspp_layers = []
conv1 = conv2d_quantized( conv1 = helper.Conv2DQuantized(
filters=self._output_channels, filters=self._output_channels,
kernel_size=(1, 1), kernel_size=(1, 1),
kernel_initializer=self._kernel_initializer, kernel_initializer=self._kernel_initializer,
...@@ -598,7 +559,7 @@ class SpatialPyramidPoolingQuantized(nn_layers.SpatialPyramidPooling): ...@@ -598,7 +559,7 @@ class SpatialPyramidPoolingQuantized(nn_layers.SpatialPyramidPooling):
kernel_size = (3, 3) kernel_size = (3, 3)
if self._use_depthwise_convolution: if self._use_depthwise_convolution:
leading_layers += [ leading_layers += [
depthwise_conv2d_quantized_output_quantized( helper.DepthwiseConv2DOutputQuantized(
depth_multiplier=1, depth_multiplier=1,
kernel_size=kernel_size, kernel_size=kernel_size,
padding='same', padding='same',
...@@ -610,7 +571,7 @@ class SpatialPyramidPoolingQuantized(nn_layers.SpatialPyramidPooling): ...@@ -610,7 +571,7 @@ class SpatialPyramidPoolingQuantized(nn_layers.SpatialPyramidPooling):
] ]
kernel_size = (1, 1) kernel_size = (1, 1)
conv_dilation = leading_layers + [ conv_dilation = leading_layers + [
conv2d_quantized( helper.Conv2DQuantized(
filters=self._output_channels, filters=self._output_channels,
kernel_size=kernel_size, kernel_size=kernel_size,
padding='same', padding='same',
...@@ -629,22 +590,13 @@ class SpatialPyramidPoolingQuantized(nn_layers.SpatialPyramidPooling): ...@@ -629,22 +590,13 @@ class SpatialPyramidPoolingQuantized(nn_layers.SpatialPyramidPooling):
if self._pool_kernel_size is None: if self._pool_kernel_size is None:
pooling = [ pooling = [
helper.quantize_wrapped_layer( helper.GlobalAveragePooling2DQuantized(),
tf.keras.layers.GlobalAveragePooling2D, helper.ReshapeQuantized((1, 1, channels))
configs.Default8BitQuantizeConfig([], [], True))(),
helper.quantize_wrapped_layer(
tf.keras.layers.Reshape,
configs.Default8BitQuantizeConfig([], [], True))((1, 1, channels))
] ]
else: else:
pooling = [ pooling = [helper.AveragePooling2DQuantized(self._pool_kernel_size)]
helper.quantize_wrapped_layer(
tf.keras.layers.AveragePooling2D,
configs.Default8BitQuantizeConfig([], [],
True))(self._pool_kernel_size)
]
conv2 = conv2d_quantized( conv2 = helper.Conv2DQuantized(
filters=self._output_channels, filters=self._output_channels,
kernel_size=(1, 1), kernel_size=(1, 1),
kernel_initializer=self._kernel_initializer, kernel_initializer=self._kernel_initializer,
...@@ -657,15 +609,11 @@ class SpatialPyramidPoolingQuantized(nn_layers.SpatialPyramidPooling): ...@@ -657,15 +609,11 @@ class SpatialPyramidPoolingQuantized(nn_layers.SpatialPyramidPooling):
epsilon=self._batchnorm_epsilon) epsilon=self._batchnorm_epsilon)
self.aspp_layers.append(pooling + [conv2, norm2]) self.aspp_layers.append(pooling + [conv2, norm2])
self._resizing_layer = helper.ResizingQuantized(
resizing = helper.quantize_wrapped_layer(
tf.keras.layers.Resizing, configs.Default8BitQuantizeConfig([], [],
True))
self._resizing_layer = resizing(
height, width, interpolation=self._interpolation) height, width, interpolation=self._interpolation)
self._projection = [ self._projection = [
conv2d_quantized( helper.Conv2DQuantized(
filters=self._output_channels, filters=self._output_channels,
kernel_size=(1, 1), kernel_size=(1, 1),
kernel_initializer=self._kernel_initializer, kernel_initializer=self._kernel_initializer,
...@@ -678,10 +626,7 @@ class SpatialPyramidPoolingQuantized(nn_layers.SpatialPyramidPooling): ...@@ -678,10 +626,7 @@ class SpatialPyramidPoolingQuantized(nn_layers.SpatialPyramidPooling):
epsilon=self._batchnorm_epsilon) epsilon=self._batchnorm_epsilon)
] ]
self._dropout_layer = tf.keras.layers.Dropout(rate=self._dropout) self._dropout_layer = tf.keras.layers.Dropout(rate=self._dropout)
concat = helper.quantize_wrapped_layer( self._concat_layer = helper.ConcatenateQuantized(axis=-1)
tf.keras.layers.Concatenate,
configs.Default8BitQuantizeConfig([], [], True))
self._concat_layer = concat(axis=-1)
def call(self, def call(self,
inputs: tf.Tensor, inputs: tf.Tensor,
......
...@@ -16,7 +16,9 @@ ...@@ -16,7 +16,9 @@
from typing import Any, Dict from typing import Any, Dict
import tensorflow as tf import tensorflow as tf
import tensorflow_model_optimization as tfmot import tensorflow_model_optimization as tfmot
from official.projects.qat.vision.quantization import configs
class LayerQuantizerHelper(object): class LayerQuantizerHelper(object):
...@@ -83,3 +85,48 @@ def quantize_wrapped_layer(cls, quantize_config): ...@@ -83,3 +85,48 @@ def quantize_wrapped_layer(cls, quantize_config):
cls(*arg, **kwargs), quantize_config) cls(*arg, **kwargs), quantize_config)
return constructor return constructor
def norm_by_activation(activation, norm_quantized, norm_no_quantized):
if activation not in ['relu', 'relu6']:
return norm_quantized
else:
return norm_no_quantized
Conv2DQuantized = quantize_wrapped_layer(
tf.keras.layers.Conv2D,
configs.Default8BitConvQuantizeConfig(['kernel'], ['activation'], False))
Conv2DOutputQuantized = quantize_wrapped_layer(
tf.keras.layers.Conv2D,
configs.Default8BitConvQuantizeConfig(['kernel'], ['activation'], True))
DepthwiseConv2DQuantized = quantize_wrapped_layer(
tf.keras.layers.DepthwiseConv2D,
configs.Default8BitConvQuantizeConfig(['depthwise_kernel'], ['activation'],
False))
DepthwiseConv2DOutputQuantized = quantize_wrapped_layer(
tf.keras.layers.DepthwiseConv2D,
configs.Default8BitConvQuantizeConfig(['depthwise_kernel'], ['activation'],
True))
GlobalAveragePooling2DQuantized = quantize_wrapped_layer(
tf.keras.layers.GlobalAveragePooling2D,
configs.Default8BitQuantizeConfig([], [], True))
AveragePooling2DQuantized = quantize_wrapped_layer(
tf.keras.layers.AveragePooling2D,
configs.Default8BitQuantizeConfig([], [], True))
ResizingQuantized = quantize_wrapped_layer(
tf.keras.layers.Resizing, configs.Default8BitQuantizeConfig([], [], True))
ConcatenateQuantized = quantize_wrapped_layer(
tf.keras.layers.Concatenate, configs.Default8BitQuantizeConfig([], [],
True))
UpSampling2DQuantized = quantize_wrapped_layer(
tf.keras.layers.UpSampling2D, configs.Default8BitQuantizeConfig([], [],
True))
ReshapeQuantized = quantize_wrapped_layer(
tf.keras.layers.Reshape, configs.Default8BitQuantizeConfig([], [], True))
# pylint:disable=g-long-lambda
BatchNormalizationQuantized = lambda norm_layer: quantize_wrapped_layer(
norm_layer, configs.Default8BitOutputQuantizeConfig())
BatchNormalizationNoQuantized = lambda norm_layer: quantize_wrapped_layer(
norm_layer, configs.NoOpQuantizeConfig())
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment