Commit 0f9f7c8e authored by Fan Yang's avatar Fan Yang Committed by A. Unique TensorFlower
Browse files

Internal change

PiperOrigin-RevId: 435078736
parent 2ce9cf2b
......@@ -18,10 +18,10 @@
from absl.testing import parameterized
import tensorflow as tf
from official import vision
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.projects.pruning.configs import image_classification as pruning_exp_cfg
from official.vision import beta
from official.vision.configs import image_classification as exp_cfg
......
......@@ -22,13 +22,13 @@ from absl.testing import parameterized
import numpy as np
import orbit
import tensorflow as tf
import tensorflow_model_optimization as tfmot
import tensorflow_model_optimization as tfmot
from official import vision
from official.core import actions
from official.core import exp_factory
from official.modeling import optimization
from official.projects.pruning.tasks import image_classification as img_cls_task
from official.vision import beta
class ImageClassificationTaskTest(tf.test.TestCase, parameterized.TestCase):
......
......@@ -17,11 +17,11 @@
from absl.testing import parameterized
import tensorflow as tf
from official import vision
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.projects.qat.vision.configs import common
from official.projects.qat.vision.configs import image_classification as qat_exp_cfg
from official.vision import beta
from official.vision.configs import image_classification as exp_cfg
......
......@@ -17,11 +17,11 @@
from absl.testing import parameterized
import tensorflow as tf
from official import vision
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.projects.qat.vision.configs import common
from official.projects.qat.vision.configs import retinanet as qat_exp_cfg
from official.vision import beta
from official.vision.configs import retinanet as exp_cfg
......
......@@ -17,11 +17,11 @@
from absl.testing import parameterized
import tensorflow as tf
from official import vision
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.projects.qat.vision.configs import common
from official.projects.qat.vision.configs import semantic_segmentation as qat_exp_cfg
from official.vision import beta
from official.vision.configs import semantic_segmentation as exp_cfg
......
......@@ -199,23 +199,7 @@ class QuantizeLayoutTransform(
'Vision>Conv2DBNBlock',
nn_blocks.Conv2DBNBlockNBitQuantized,
num_bits_weight=self._num_bits_weight,
num_bits_activation=self._num_bits_activation),
# TODO(yeqing): Remove the `Beta` custom layers.
CustomLayerQuantize(
'Beta>BottleneckBlock',
nn_blocks.BottleneckBlockNBitQuantized,
num_bits_weight=self._num_bits_weight,
num_bits_activation=self._num_bits_activation),
CustomLayerQuantize(
'Beta>InvertedBottleneckBlock',
nn_blocks.InvertedBottleneckBlockNBitQuantized,
num_bits_weight=self._num_bits_weight,
num_bits_activation=self._num_bits_activation),
CustomLayerQuantize(
'Beta>Conv2DBNBlock',
nn_blocks.Conv2DBNBlockNBitQuantized,
num_bits_weight=self._num_bits_weight,
num_bits_activation=self._num_bits_activation),
num_bits_activation=self._num_bits_activation)
]
return _ModelTransformer(model, transforms, set(layer_quantize_map.keys()),
layer_quantize_map).transform()
......
......@@ -102,10 +102,7 @@ class CustomLayerQuantize(
if bottleneck_layer['class_name'] in [
'Vision>Conv2DBNBlock', 'Vision>InvertedBottleneckBlock',
'Vision>SegmentationHead', 'Vision>SpatialPyramidPooling',
'Vision>ASPP',
# TODO(yeqing): Removes the Beta layers.
'Beta>Conv2DBNBlock', 'Beta>InvertedBottleneckBlock',
'Beta>SegmentationHead', 'Beta>SpatialPyramidPooling', 'Beta>ASPP'
'Vision>ASPP'
]:
layer_metadata = {'quantize_config': configs.NoOpQuantizeConfig()}
else:
......@@ -170,20 +167,7 @@ class QuantizeLayoutTransform(
quantized_nn_layers.SegmentationHeadQuantized),
CustomLayerQuantize('Vision>SpatialPyramidPooling',
quantized_nn_layers.SpatialPyramidPoolingQuantized),
CustomLayerQuantize('Vision>ASPP', quantized_nn_layers.ASPPQuantized),
# TODO(yeqing): Remove the `Beta` components.
CustomLayerQuantize('Beta>BottleneckBlock',
quantized_nn_blocks.BottleneckBlockQuantized),
CustomLayerQuantize(
'Beta>InvertedBottleneckBlock',
quantized_nn_blocks.InvertedBottleneckBlockQuantized),
CustomLayerQuantize('Beta>Conv2DBNBlock',
quantized_nn_blocks.Conv2DBNBlockQuantized),
CustomLayerQuantize('Beta>SegmentationHead',
quantized_nn_layers.SegmentationHeadQuantized),
CustomLayerQuantize('Beta>SpatialPyramidPooling',
quantized_nn_layers.SpatialPyramidPoolingQuantized),
CustomLayerQuantize('Beta>ASPP', quantized_nn_layers.ASPPQuantized)
CustomLayerQuantize('Vision>ASPP', quantized_nn_layers.ASPPQuantized)
]
return tfmot.quantization.keras.graph_transformations.model_transformer.ModelTransformer(
model, transforms,
......
......@@ -19,10 +19,10 @@ from absl.testing import parameterized
import orbit
import tensorflow as tf
from official import vision
from official.core import exp_factory
from official.modeling import optimization
from official.projects.qat.vision.tasks import image_classification as img_cls_task
from official.vision import beta
class ImageClassificationTaskTest(tf.test.TestCase, parameterized.TestCase):
......
......@@ -19,10 +19,10 @@ from absl.testing import parameterized
import orbit
import tensorflow as tf
from official import vision
from official.core import exp_factory
from official.modeling import optimization
from official.projects.qat.vision.tasks import retinanet
from official.vision import beta
from official.vision.configs import retinanet as exp_cfg
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment