Commit 9ef7d76e authored by Jaehong Kim's avatar Jaehong Kim Committed by A. Unique TensorFlower
Browse files

Remove redundant fake-quants for the object detection model.

PiperOrigin-RevId: 456392981
parent d182e423
...@@ -175,6 +175,7 @@ def build_qat_retinanet( ...@@ -175,6 +175,7 @@ def build_qat_retinanet(
clone_function=_clone_function_for_fpn, clone_function=_clone_function_for_fpn,
) )
decoder = tfmot.quantization.keras.quantize_model(decoder) decoder = tfmot.quantization.keras.quantize_model(decoder)
decoder = tfmot.quantization.keras.remove_input_range(decoder)
head = model.head head = model.head
if quantization.quantize_detection_head: if quantization.quantize_detection_head:
......
...@@ -54,7 +54,7 @@ class SeparableConv2DQuantized(tf.keras.layers.Layer): ...@@ -54,7 +54,7 @@ class SeparableConv2DQuantized(tf.keras.layers.Layer):
depthwise_conv2d_quantized = helper.quantize_wrapped_layer( depthwise_conv2d_quantized = helper.quantize_wrapped_layer(
tf.keras.layers.DepthwiseConv2D, tf.keras.layers.DepthwiseConv2D,
configs.Default8BitConvQuantizeConfig( configs.Default8BitConvQuantizeConfig(
['depthwise_kernel'], ['activation'], True)) ['depthwise_kernel'], [], True))
conv2d_quantized = helper.quantize_wrapped_layer( conv2d_quantized = helper.quantize_wrapped_layer(
tf.keras.layers.Conv2D, tf.keras.layers.Conv2D,
configs.Default8BitConvQuantizeConfig( configs.Default8BitConvQuantizeConfig(
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment