Unverified Commit 7479dbb8 authored by Srihari Humbarwadi's avatar Srihari Humbarwadi Committed by GitHub
Browse files

Merge branch 'tensorflow:master' into panoptic-deeplab-modeling

parents 8b60a5a8 9c8cbd0c
runtime:
distribution_strategy: 'mirrored'
mixed_precision_dtype: 'float32'
loss_scale: 'dynamic'
task:
model:
num_classes: 1001
input_size: [224, 224, 3]
backbone:
type: 'resnet'
resnet:
model_id: 50
losses:
l2_weight_decay: 0.0001
one_hot: true
label_smoothing: 0.1
train_data:
input_path: '/readahead/200M/placer/prod/home/distbelief/imagenet-tensorflow/imagenet-2012-tfrecord/train*'
is_training: true
global_batch_size: 256
dtype: 'float32'
validation_data:
input_path: '/readahead/200M/placer/prod/home/distbelief/imagenet-tensorflow/imagenet-2012-tfrecord/valid*'
is_training: false
global_batch_size: 256
dtype: 'float32'
drop_remainder: false
quantization:
pretrained_original_checkpoint: 'gs://**/resnet_classifier_gpu/ckpt-56160'
trainer:
# With below setting, the accuracy of QAT reaches to Top1-accuracy 0.7720 after 5 days of training
# with 8GPUs, which is higher than the non-quantized float32 version Resnet.
train_steps: 449280
validation_steps: 200
validation_interval: 5000
steps_per_loop: 5000
summary_interval: 5000
checkpoint_interval: 5000
optimizer_config:
optimizer:
type: 'sgd'
sgd:
momentum: 0.9
learning_rate:
type: 'stepwise'
stepwise:
boundaries: [150000, 300000, 400000]
values: [0.08, 0.008, 0.0008, 0.00008]
warmup:
type: 'linear'
linear:
warmup_steps: 40000
runtime:
distribution_strategy: 'mirrored'
mixed_precision_dtype: 'float32'
loss_scale: 'dynamic'
task:
model:
num_classes: 1001
input_size: [224, 224, 3]
backbone:
type: 'resnet'
resnet:
model_id: 50
losses:
l2_weight_decay: 0.0001
one_hot: true
label_smoothing: 0.1
train_data:
input_path: '/readahead/200M/placer/prod/home/distbelief/imagenet-tensorflow/imagenet-2012-tfrecord/train*'
is_training: true
global_batch_size: 256
dtype: 'float32'
validation_data:
input_path: '/readahead/200M/placer/prod/home/distbelief/imagenet-tensorflow/imagenet-2012-tfrecord/valid*'
is_training: false
global_batch_size: 256
dtype: 'float32'
drop_remainder: false
quantization:
pretrained_original_checkpoint: 'gs://**/resnet_classifier_gpu/ckpt-56160'
trainer:
# With below setting, the accuracy of QAT reaches to the non-quantized float32 version after
# around 160k steps, which takes 1d 15h with 8 GPUS.
train_steps: 449280
validation_steps: 200
validation_interval: 5000
steps_per_loop: 5000
summary_interval: 5000
checkpoint_interval: 5000
optimizer_config:
optimizer:
type: 'sgd'
sgd:
momentum: 0.9
learning_rate:
type: 'exponential'
exponential:
initial_learning_rate: 0.016
decay_steps: 25000
decay_rate: 0.5
staircase: true
warmup:
type: 'linear'
linear:
warmup_steps: 1000
runtime:
distribution_strategy: 'mirrored'
mixed_precision_dtype: 'float32'
loss_scale: 'dynamic'
task:
model:
num_classes: 1001
input_size: [224, 224, 3]
backbone:
type: 'resnet'
resnet:
model_id: 50
losses:
l2_weight_decay: 0.0001
one_hot: true
label_smoothing: 0.1
train_data:
input_path: '/readahead/200M/placer/prod/home/distbelief/imagenet-tensorflow/imagenet-2012-tfrecord/train*'
is_training: true
global_batch_size: 256
dtype: 'float32'
validation_data:
input_path: '/readahead/200M/placer/prod/home/distbelief/imagenet-tensorflow/imagenet-2012-tfrecord/valid*'
is_training: false
global_batch_size: 256
dtype: 'float32'
drop_remainder: false
quantization:
pretrained_original_checkpoint: 'gs://**/resnet_classifier_gpu/ckpt-56160'
change_num_bits: true
num_bits_weight: 4
num_bits_activation: 4
trainer:
# With below setting, the accuracy of QAT reaches Top1-accuracy 0.6822 at 205k steps with 8GPUs.
# TODO: Please change the configs when training is done.
train_steps: 449280
validation_steps: 200
validation_interval: 5000
steps_per_loop: 5000
summary_interval: 5000
checkpoint_interval: 5000
optimizer_config:
optimizer:
type: 'sgd'
sgd:
momentum: 0.9
learning_rate:
type: 'exponential'
exponential:
initial_learning_rate: 0.016
decay_steps: 25000
decay_rate: 0.5
staircase: true
warmup:
type: 'linear'
linear:
warmup_steps: 1000
runtime:
distribution_strategy: 'mirrored'
mixed_precision_dtype: 'float32'
loss_scale: 'dynamic'
task:
model:
num_classes: 1001
input_size: [224, 224, 3]
backbone:
type: 'resnet'
resnet:
model_id: 50
losses:
l2_weight_decay: 0.0001
one_hot: true
label_smoothing: 0.1
train_data:
input_path: '/readahead/200M/placer/prod/home/distbelief/imagenet-tensorflow/imagenet-2012-tfrecord/train*'
is_training: true
global_batch_size: 256
dtype: 'float32'
validation_data:
input_path: '/readahead/200M/placer/prod/home/distbelief/imagenet-tensorflow/imagenet-2012-tfrecord/valid*'
is_training: false
global_batch_size: 256
dtype: 'float32'
drop_remainder: false
quantization:
pretrained_original_checkpoint: 'gs://**/resnet_classifier_gpu/ckpt-56160'
change_num_bits: true
num_bits_weight: 4
num_bits_activation: 8
trainer:
# With below setting, the accuracy of QAT reaches Top1-accuracy 0.7575 at 220k steps with 8GPUs.
# TODO: Please change the configs when training is done.
train_steps: 449280
validation_steps: 200
validation_interval: 5000
steps_per_loop: 5000
summary_interval: 5000
checkpoint_interval: 5000
optimizer_config:
optimizer:
type: 'sgd'
sgd:
momentum: 0.9
learning_rate:
type: 'exponential'
exponential:
initial_learning_rate: 0.016
decay_steps: 25000
decay_rate: 0.5
staircase: true
warmup:
type: 'linear'
linear:
warmup_steps: 1000
runtime:
distribution_strategy: 'mirrored'
mixed_precision_dtype: 'float32'
loss_scale: 'dynamic'
task:
model:
num_classes: 1001
input_size: [224, 224, 3]
backbone:
type: 'resnet'
resnet:
model_id: 50
losses:
l2_weight_decay: 0.0001
one_hot: true
label_smoothing: 0.1
train_data:
input_path: '/readahead/200M/placer/prod/home/distbelief/imagenet-tensorflow/imagenet-2012-tfrecord/train*'
is_training: true
global_batch_size: 256
dtype: 'float32'
validation_data:
input_path: '/readahead/200M/placer/prod/home/distbelief/imagenet-tensorflow/imagenet-2012-tfrecord/valid*'
is_training: false
global_batch_size: 256
dtype: 'float32'
drop_remainder: false
quantization:
pretrained_original_checkpoint: 'gs://**/resnet_classifier_gpu/ckpt-56160'
change_num_bits: true
num_bits_weight: 6
num_bits_activation: 6
trainer:
# With below setting, the accuracy of QAT reaches Top1-accuracy 0.7607 at 190k steps with 8GPUs.
# TODO: Please change the configs when training is done.
train_steps: 449280
validation_steps: 200
validation_interval: 5000
steps_per_loop: 5000
summary_interval: 5000
checkpoint_interval: 5000
optimizer_config:
optimizer:
type: 'sgd'
sgd:
momentum: 0.9
learning_rate:
type: 'exponential'
exponential:
initial_learning_rate: 0.016
decay_steps: 25000
decay_rate: 0.5
staircase: true
warmup:
type: 'linear'
linear:
warmup_steps: 1000
# --experiment_type=retinanet_spinenet_mobile_coco_qat
runtime:
distribution_strategy: 'mirrored'
mixed_precision_dtype: 'float32'
task:
losses:
l2_weight_decay: 3.0e-05
model:
anchor:
anchor_size: 3
aspect_ratios: [0.5, 1.0, 2.0]
num_scales: 3
backbone:
spinenet_mobile:
stochastic_depth_drop_rate: 0.2
model_id: '49'
se_ratio: 0.2
use_keras_upsampling_2d: true
type: 'spinenet_mobile'
decoder:
type: 'identity'
head:
num_convs: 4
num_filters: 48
use_separable_conv: true
input_size: [384, 384, 3]
max_level: 7
min_level: 3
norm_activation:
activation: 'swish'
norm_epsilon: 0.001
norm_momentum: 0.99
use_sync_bn: true
train_data:
dtype: 'float32'
global_batch_size: 128
is_training: true
parser:
aug_rand_hflip: true
aug_scale_max: 2.0
aug_scale_min: 0.5
validation_data:
dtype: 'float32'
global_batch_size: 8
is_training: false
quantization:
pretrained_original_checkpoint: 'gs://**/coco_spinenet49_mobile_tpu/ckpt-277200'
trainer:
checkpoint_interval: 924
optimizer_config:
learning_rate:
stepwise:
boundaries: [531300, 545160]
values: [0.0016, 0.00016, 0.000016]
type: 'stepwise'
warmup:
linear:
warmup_learning_rate: 0.0000335
warmup_steps: 4000
steps_per_loop: 924
train_steps: 554400
validation_interval: 924
validation_steps: 1250
summary_interval: 924
# --experiment_type=mnv2_deeplabv3_pascal_qat
# Use 8 v100 GPUs for training and 4 v100 GPUs for eval.
# mIoU (unquantized fp32): 74.78
runtime:
distribution_strategy: 'mirrored'
mixed_precision_dtype: 'float32'
loss_scale: 'dynamic'
task:
model:
num_classes: 21
input_size: [512, 512, 3]
backbone:
type: 'mobilenet'
mobilenet:
model_id: 'MobileNetV2'
output_stride: 16
decoder:
aspp:
dilation_rates: []
level: 4
pool_kernel_size: null
output_tensor: true
type: 'aspp'
head:
feature_fusion: null
num_convs: 0
norm_activation:
activation: relu
norm_epsilon: 0.001
norm_momentum: 0.99
use_sync_bn: true
losses:
l2_weight_decay: 4.0e-07 # 1/100 of original value.
train_data:
output_size: [512, 512]
crop_size: [512, 512]
input_path: 'gs://**/pascal_voc_seg/train_aug*'
is_training: true
global_batch_size: 16
dtype: 'float32'
aug_rand_hflip: true
aug_scale_max: 2.0
aug_scale_min: 0.5
validation_data:
output_size: [512, 512]
input_path: 'gs://**/pascal_voc_seg/val*'
is_training: false
global_batch_size: 16
dtype: 'float32'
drop_remainder: false
resize_eval_groundtruth: false
groundtruth_padded_size: [512, 512]
quantization:
pretrained_original_checkpoint: 'gs://**/deeplabv3_mobilenetv2_pascal_coco_0.21/29808901/best_ckpt/best_ckpt-54'
init_checkpoint: null
trainer:
optimizer_config:
learning_rate:
polynomial:
decay_steps: 13240
initial_learning_rate: 0.00007 # 1/100 of original lr.
power: 0.9
type: polynomial
optimizer:
sgd:
momentum: 0.9
type: sgd
warmup:
linear:
name: linear
warmup_steps: 0 # No warmup
type: linear
best_checkpoint_eval_metric: 'mean_iou'
best_checkpoint_export_subdir: 'best_ckpt'
best_checkpoint_metric_comp: 'higher'
steps_per_loop: 662
summary_interval: 662
train_steps: 13240
validation_interval: 662
validation_steps: 90
checkpoint_interval: 662
# --experiment_type=mnv2_deeplabv3_pascal_qat
# Use 4x2 DF for training and eval.
# mIoU (unquantized fp32): 74.69
runtime:
distribution_strategy: 'tpu'
mixed_precision_dtype: 'float32'
task:
model:
num_classes: 21
input_size: [512, 512, 3]
backbone:
type: 'mobilenet'
mobilenet:
model_id: 'MobileNetV2'
output_stride: 16
decoder:
aspp:
dilation_rates: []
level: 4
pool_kernel_size: null
output_tensor: true
type: 'aspp'
head:
feature_fusion: null
num_convs: 0
norm_activation:
activation: relu
norm_epsilon: 0.001
norm_momentum: 0.99
use_sync_bn: true
losses:
l2_weight_decay: 4.0e-07 # 1/100 of original value.
train_data:
output_size: [512, 512]
crop_size: [512, 512]
input_path: 'gs://**/pascal_voc_seg/train_aug*'
is_training: true
global_batch_size: 16
dtype: 'float32'
aug_rand_hflip: true
aug_scale_max: 2.0
aug_scale_min: 0.5
validation_data:
output_size: [512, 512]
input_path: 'gs://**/pascal_voc_seg/val*'
is_training: false
global_batch_size: 16
dtype: 'float32'
drop_remainder: false
resize_eval_groundtruth: false
groundtruth_padded_size: [512, 512]
quantization:
pretrained_original_checkpoint: 'gs://**/deeplabv3_mobilenetv2_pascal_coco_0.21/29808901/best_ckpt/best_ckpt-54'
init_checkpoint: null
trainer:
optimizer_config:
learning_rate:
polynomial:
decay_steps: 13240
initial_learning_rate: 0.00007 # 1/100 of original lr.
power: 0.9
type: polynomial
optimizer:
sgd:
momentum: 0.9
type: sgd
warmup:
linear:
name: linear
warmup_steps: 0 # No warmup
type: linear
best_checkpoint_eval_metric: 'mean_iou'
best_checkpoint_export_subdir: 'best_ckpt'
best_checkpoint_metric_comp: 'higher'
steps_per_loop: 662
summary_interval: 662
train_steps: 13240
validation_interval: 662
validation_steps: 90
checkpoint_interval: 662
# --experiment_type=mnv2_deeplabv3plus_cityscapes_qat
# Use 4x2 DF for training and eval.
# mIoU (unquantized fp32): 73.84
runtime:
distribution_strategy: 'tpu'
mixed_precision_dtype: 'float32'
task:
model:
num_classes: 19
input_size: [1024, 2048, 3]
backbone:
type: 'mobilenet'
mobilenet:
model_id: 'MobileNetV2'
output_stride: 16
output_intermediate_endpoints: true
decoder:
aspp:
dilation_rates: []
level: 4
pool_kernel_size: [512, 1024]
output_tensor: true
type: 'aspp'
head:
feature_fusion: 'deeplabv3plus'
low_level: '2/depthwise'
low_level_num_filters: 48
level: 4
num_convs: 2
use_depthwise_convolution: true
norm_activation:
activation: relu
norm_epsilon: 0.001
norm_momentum: 0.99
use_sync_bn: true
losses:
l2_weight_decay: 4.0e-07 # 1/100 of original value.
train_data:
output_size: [1024, 2048]
crop_size: []
input_path: ''
tfds_name: 'cityscapes/semantic_segmentation'
tfds_split: 'train'
is_training: true
global_batch_size: 16
dtype: 'float32'
aug_rand_hflip: true
aug_scale_max: 2.0
aug_scale_min: 0.5
validation_data:
output_size: [1024, 2048]
input_path: ''
tfds_name: 'cityscapes/semantic_segmentation'
tfds_split: 'validation'
is_training: false
global_batch_size: 16
dtype: 'float32'
drop_remainder: false
resize_eval_groundtruth: true
quantization:
pretrained_original_checkpoint: 'gs://**/deeplabv3plus_mobilenetv2_cityscapes/29814723/best_ckpt/best_ckpt-408'
init_checkpoint: null
trainer:
optimizer_config:
learning_rate:
polynomial:
decay_steps: 20000
initial_learning_rate: 0.0001 # 1/100 of original lr.
power: 0.9
type: polynomial
optimizer:
sgd:
momentum: 0.9
type: sgd
warmup:
linear:
name: linear
warmup_learning_rate: 0
warmup_steps: 0 # No warmup
type: linear
steps_per_loop: 185
summary_interval: 185
train_steps: 20000
validation_interval: 185
validation_steps: 31
checkpoint_interval: 185
best_checkpoint_export_subdir: 'best_ckpt'
best_checkpoint_eval_metric: 'mean_iou'
best_checkpoint_metric_comp: 'higher'
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Image classification configuration definition."""
import dataclasses
from typing import Optional
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.projects.qat.vision.configs import common
from official.vision.beta.configs import image_classification
@dataclasses.dataclass
class ImageClassificationTask(image_classification.ImageClassificationTask):
quantization: Optional[common.Quantization] = None
@exp_factory.register_config_factory('resnet_imagenet_qat')
def image_classification_imagenet() -> cfg.ExperimentConfig:
"""Builds an image classification config for the resnet with QAT."""
config = image_classification.image_classification_imagenet()
task = ImageClassificationTask.from_args(
quantization=common.Quantization(), **config.task.as_dict())
config.task = task
return config
@exp_factory.register_config_factory('mobilenet_imagenet_qat')
def image_classification_imagenet_mobilenet() -> cfg.ExperimentConfig:
"""Builds an image classification config for the mobilenetV2 with QAT."""
config = image_classification.image_classification_imagenet_mobilenet()
task = ImageClassificationTask.from_args(
quantization=common.Quantization(), **config.task.as_dict())
config.task = task
return config
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for image_classification."""
# pylint: disable=unused-import
from absl.testing import parameterized
import tensorflow as tf
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.projects.qat.vision.configs import common
from official.projects.qat.vision.configs import image_classification as qat_exp_cfg
from official.vision import beta
from official.vision.beta.configs import image_classification as exp_cfg
class ImageClassificationConfigTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(
('resnet_imagenet_qat',),
('mobilenet_imagenet_qat',),
)
def test_image_classification_configs(self, config_name):
config = exp_factory.get_exp_config(config_name)
self.assertIsInstance(config, cfg.ExperimentConfig)
self.assertIsInstance(config.task, qat_exp_cfg.ImageClassificationTask)
self.assertIsInstance(config.task.model,
exp_cfg.ImageClassificationModel)
self.assertIsInstance(config.task.quantization, common.Quantization)
self.assertIsInstance(config.task.train_data, exp_cfg.DataConfig)
config.task.train_data.is_training = None
with self.assertRaisesRegex(KeyError, 'Found inconsistncy between key'):
config.validate()
if __name__ == '__main__':
tf.test.main()
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""RetinaNet configuration definition."""
import dataclasses
from typing import Optional
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.projects.qat.vision.configs import common
from official.vision.beta.configs import retinanet
from official.vision.beta.configs.google import backbones
@dataclasses.dataclass
class RetinaNetTask(retinanet.RetinaNetTask):
quantization: Optional[common.Quantization] = None
@exp_factory.register_config_factory('retinanet_spinenet_mobile_coco_qat')
def retinanet_spinenet_mobile_coco() -> cfg.ExperimentConfig:
"""Generates a config for COCO OD RetinaNet for mobile with QAT."""
config = retinanet.retinanet_spinenet_mobile_coco()
task = RetinaNetTask.from_args(
quantization=common.Quantization(), **config.task.as_dict())
task.model.backbone = backbones.Backbone(
type='spinenet_mobile',
spinenet_mobile=backbones.SpineNetMobile(
model_id='49',
stochastic_depth_drop_rate=0.2,
min_level=3,
max_level=7,
use_keras_upsampling_2d=True))
config.task = task
return config
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for retinanet."""
# pylint: disable=unused-import
from absl.testing import parameterized
import tensorflow as tf
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.projects.qat.vision.configs import common
from official.projects.qat.vision.configs import retinanet as qat_exp_cfg
from official.vision import beta
from official.vision.beta.configs import retinanet as exp_cfg
class RetinaNetConfigTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(
('retinanet_spinenet_mobile_coco_qat',),
)
def test_retinanet_configs(self, config_name):
config = exp_factory.get_exp_config(config_name)
self.assertIsInstance(config, cfg.ExperimentConfig)
self.assertIsInstance(config.task, qat_exp_cfg.RetinaNetTask)
self.assertIsInstance(config.task.model, exp_cfg.RetinaNet)
self.assertIsInstance(config.task.quantization, common.Quantization)
self.assertIsInstance(config.task.train_data, exp_cfg.DataConfig)
config.validate()
config.task.train_data.is_training = None
with self.assertRaisesRegex(KeyError, 'Found inconsistncy between key'):
config.validate()
if __name__ == '__main__':
tf.test.main()
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""RetinaNet configuration definition."""
import dataclasses
from typing import Optional
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.projects.qat.vision.configs import common
from official.vision.beta.configs import semantic_segmentation
@dataclasses.dataclass
class SemanticSegmentationTask(semantic_segmentation.SemanticSegmentationTask):
quantization: Optional[common.Quantization] = None
@exp_factory.register_config_factory('mnv2_deeplabv3_pascal_qat')
def mnv2_deeplabv3_pascal() -> cfg.ExperimentConfig:
"""Generates a config for MobileNet v2 + deeplab v3 with QAT."""
config = semantic_segmentation.mnv2_deeplabv3_pascal()
task = SemanticSegmentationTask.from_args(
quantization=common.Quantization(), **config.task.as_dict())
config.task = task
return config
@exp_factory.register_config_factory('mnv2_deeplabv3_cityscapes_qat')
def mnv2_deeplabv3_cityscapes() -> cfg.ExperimentConfig:
"""Generates a config for MobileNet v2 + deeplab v3 with QAT."""
config = semantic_segmentation.mnv2_deeplabv3_cityscapes()
task = SemanticSegmentationTask.from_args(
quantization=common.Quantization(), **config.task.as_dict())
config.task = task
return config
@exp_factory.register_config_factory('mnv2_deeplabv3plus_cityscapes_qat')
def mnv2_deeplabv3plus_cityscapes() -> cfg.ExperimentConfig:
"""Generates a config for MobileNet v2 + deeplab v3+ with QAT."""
config = semantic_segmentation.mnv2_deeplabv3plus_cityscapes()
task = SemanticSegmentationTask.from_args(
quantization=common.Quantization(), **config.task.as_dict())
config.task = task
return config
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for retinanet."""
# pylint: disable=unused-import
from absl.testing import parameterized
import tensorflow as tf
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.projects.qat.vision.configs import common
from official.projects.qat.vision.configs import semantic_segmentation as qat_exp_cfg
from official.vision import beta
from official.vision.beta.configs import semantic_segmentation as exp_cfg
class SemanticSegmentationConfigTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(('mnv2_deeplabv3_pascal_qat',),
('mnv2_deeplabv3_cityscapes_qat',),
('mnv2_deeplabv3plus_cityscapes_qat'))
def test_semantic_segmentation_configs(self, config_name):
config = exp_factory.get_exp_config(config_name)
self.assertIsInstance(config, cfg.ExperimentConfig)
self.assertIsInstance(config.task, qat_exp_cfg.SemanticSegmentationTask)
self.assertIsInstance(config.task.model, exp_cfg.SemanticSegmentationModel)
self.assertIsInstance(config.task.quantization, common.Quantization)
self.assertIsInstance(config.task.train_data, exp_cfg.DataConfig)
config.validate()
config.task.train_data.is_training = None
with self.assertRaisesRegex(KeyError, 'Found inconsistncy between key'):
config.validate()
if __name__ == '__main__':
tf.test.main()
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Modeling package definition."""
from official.projects.qat.vision.modeling import layers
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Factory methods to build models."""
# Import libraries
import tensorflow as tf
import tensorflow_model_optimization as tfmot
from official.projects.qat.vision.configs import common
from official.projects.qat.vision.modeling import segmentation_model as qat_segmentation_model
from official.projects.qat.vision.n_bit import schemes as n_bit_schemes
from official.projects.qat.vision.quantization import schemes
from official.vision.beta import configs
from official.vision.beta.modeling import classification_model
from official.vision.beta.modeling import retinanet_model
from official.vision.beta.modeling.decoders import aspp
from official.vision.beta.modeling.heads import segmentation_heads
from official.vision.beta.modeling.layers import nn_layers
def build_qat_classification_model(
model: tf.keras.Model,
quantization: common.Quantization,
input_specs: tf.keras.layers.InputSpec,
model_config: configs.image_classification.ImageClassificationModel,
l2_regularizer: tf.keras.regularizers.Regularizer = None
) -> tf.keras.Model: # pytype: disable=annotation-type-mismatch # typed-keras
"""Apply model optimization techniques.
Args:
model: The model applying model optimization techniques.
quantization: The Quantization config.
input_specs: `tf.keras.layers.InputSpec` specs of the input tensor.
model_config: The model config.
l2_regularizer: tf.keras.regularizers.Regularizer object. Default to None.
Returns:
model: The model that applied optimization techniques.
"""
original_checkpoint = quantization.pretrained_original_checkpoint
if original_checkpoint:
ckpt = tf.train.Checkpoint(
model=model,
**model.checkpoint_items)
status = ckpt.read(original_checkpoint)
status.expect_partial().assert_existing_objects_matched()
scope_dict = {
'L2': tf.keras.regularizers.l2,
}
with tfmot.quantization.keras.quantize_scope(scope_dict):
annotated_backbone = tfmot.quantization.keras.quantize_annotate_model(
model.backbone)
if quantization.change_num_bits:
backbone = tfmot.quantization.keras.quantize_apply(
annotated_backbone,
scheme=n_bit_schemes.DefaultNBitQuantizeScheme(
num_bits_weight=quantization.num_bits_weight,
num_bits_activation=quantization.num_bits_activation))
else:
backbone = tfmot.quantization.keras.quantize_apply(
annotated_backbone,
scheme=schemes.Default8BitQuantizeScheme())
norm_activation_config = model_config.norm_activation
backbone_optimized_model = classification_model.ClassificationModel(
backbone=backbone,
num_classes=model_config.num_classes,
input_specs=input_specs,
dropout_rate=model_config.dropout_rate,
kernel_regularizer=l2_regularizer,
add_head_batch_norm=model_config.add_head_batch_norm,
use_sync_bn=norm_activation_config.use_sync_bn,
norm_momentum=norm_activation_config.norm_momentum,
norm_epsilon=norm_activation_config.norm_epsilon)
for from_layer, to_layer in zip(
model.layers, backbone_optimized_model.layers):
if from_layer != model.backbone:
to_layer.set_weights(from_layer.get_weights())
with tfmot.quantization.keras.quantize_scope(scope_dict):
def apply_quantization_to_dense(layer):
if isinstance(layer, (tf.keras.layers.Dense,
tf.keras.layers.Dropout,
tf.keras.layers.GlobalAveragePooling2D)):
return tfmot.quantization.keras.quantize_annotate_layer(layer)
return layer
annotated_model = tf.keras.models.clone_model(
backbone_optimized_model,
clone_function=apply_quantization_to_dense,
)
if quantization.change_num_bits:
optimized_model = tfmot.quantization.keras.quantize_apply(
annotated_model,
scheme=n_bit_schemes.DefaultNBitQuantizeScheme(
num_bits_weight=quantization.num_bits_weight,
num_bits_activation=quantization.num_bits_activation))
else:
optimized_model = tfmot.quantization.keras.quantize_apply(
annotated_model)
return optimized_model
def build_qat_retinanet(
model: tf.keras.Model, quantization: common.Quantization,
model_config: configs.retinanet.RetinaNet) -> tf.keras.Model:
"""Applies quantization aware training for RetinaNet model.
Args:
model: The model applying quantization aware training.
quantization: The Quantization config.
model_config: The model config.
Returns:
The model that applied optimization techniques.
"""
original_checkpoint = quantization.pretrained_original_checkpoint
if original_checkpoint is not None:
ckpt = tf.train.Checkpoint(
model=model,
**model.checkpoint_items)
status = ckpt.read(original_checkpoint)
status.expect_partial().assert_existing_objects_matched()
scope_dict = {
'L2': tf.keras.regularizers.l2,
}
with tfmot.quantization.keras.quantize_scope(scope_dict):
annotated_backbone = tfmot.quantization.keras.quantize_annotate_model(
model.backbone)
optimized_backbone = tfmot.quantization.keras.quantize_apply(
annotated_backbone,
scheme=schemes.Default8BitQuantizeScheme())
optimized_model = retinanet_model.RetinaNetModel(
optimized_backbone,
model.decoder,
model.head,
model.detection_generator,
min_level=model_config.min_level,
max_level=model_config.max_level,
num_scales=model_config.anchor.num_scales,
aspect_ratios=model_config.anchor.aspect_ratios,
anchor_size=model_config.anchor.anchor_size)
return optimized_model
def build_qat_segmentation_model(
model: tf.keras.Model, quantization: common.Quantization,
input_specs: tf.keras.layers.InputSpec) -> tf.keras.Model:
"""Applies quantization aware training for segmentation model.
Args:
model: The model applying quantization aware training.
quantization: The Quantization config.
input_specs: The shape specifications of input tensor.
Returns:
The model that applied optimization techniques.
"""
original_checkpoint = quantization.pretrained_original_checkpoint
if original_checkpoint is not None:
ckpt = tf.train.Checkpoint(model=model, **model.checkpoint_items)
status = ckpt.read(original_checkpoint)
status.expect_partial().assert_existing_objects_matched()
# Build quantization compatible model.
model = qat_segmentation_model.SegmentationModelQuantized(
model.backbone, model.decoder, model.head, input_specs)
scope_dict = {
'L2': tf.keras.regularizers.l2,
}
# Apply QAT to backbone (a tf.keras.Model) first.
with tfmot.quantization.keras.quantize_scope(scope_dict):
annotated_backbone = tfmot.quantization.keras.quantize_annotate_model(
model.backbone)
optimized_backbone = tfmot.quantization.keras.quantize_apply(
annotated_backbone, scheme=schemes.Default8BitQuantizeScheme())
backbone_optimized_model = qat_segmentation_model.SegmentationModelQuantized(
optimized_backbone, model.decoder, model.head, input_specs)
# Copy over all remaining layers.
for from_layer, to_layer in zip(model.layers,
backbone_optimized_model.layers):
if from_layer != model.backbone:
to_layer.set_weights(from_layer.get_weights())
with tfmot.quantization.keras.quantize_scope(scope_dict):
def apply_quantization_to_layers(layer):
if isinstance(layer, (segmentation_heads.SegmentationHead,
nn_layers.SpatialPyramidPooling, aspp.ASPP)):
return tfmot.quantization.keras.quantize_annotate_layer(layer)
return layer
annotated_model = tf.keras.models.clone_model(
backbone_optimized_model,
clone_function=apply_quantization_to_layers,
)
optimized_model = tfmot.quantization.keras.quantize_apply(
annotated_model, scheme=schemes.Default8BitQuantizeScheme())
return optimized_model
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for factory.py."""
# Import libraries
from absl.testing import parameterized
import tensorflow as tf
from official.projects.qat.vision.configs import common
from official.projects.qat.vision.modeling import factory as qat_factory
from official.vision.beta.configs import backbones
from official.vision.beta.configs import decoders
from official.vision.beta.configs import image_classification as classification_cfg
from official.vision.beta.configs import retinanet as retinanet_cfg
from official.vision.beta.configs import semantic_segmentation as semantic_segmentation_cfg
from official.vision.beta.modeling import factory
class ClassificationModelBuilderTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
('resnet', (224, 224), 5e-5),
('resnet', (224, 224), None),
('resnet', (None, None), 5e-5),
('resnet', (None, None), None),
('mobilenet', (224, 224), 5e-5),
('mobilenet', (224, 224), None),
('mobilenet', (None, None), 5e-5),
('mobilenet', (None, None), None),
)
def test_builder(self, backbone_type, input_size, weight_decay):
num_classes = 2
input_specs = tf.keras.layers.InputSpec(
shape=[None, input_size[0], input_size[1], 3])
model_config = classification_cfg.ImageClassificationModel(
num_classes=num_classes,
backbone=backbones.Backbone(type=backbone_type))
l2_regularizer = (
tf.keras.regularizers.l2(weight_decay) if weight_decay else None)
model = factory.build_classification_model(
input_specs=input_specs,
model_config=model_config,
l2_regularizer=l2_regularizer)
quantization_config = common.Quantization()
_ = qat_factory.build_qat_classification_model(
model=model,
input_specs=input_specs,
quantization=quantization_config,
model_config=model_config,
l2_regularizer=l2_regularizer)
class RetinaNetBuilderTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
('spinenet_mobile', (640, 640), False),
)
def test_builder(self, backbone_type, input_size, has_attribute_heads):
num_classes = 2
input_specs = tf.keras.layers.InputSpec(
shape=[None, input_size[0], input_size[1], 3])
if has_attribute_heads:
attribute_heads_config = [
retinanet_cfg.AttributeHead(name='att1'),
retinanet_cfg.AttributeHead(
name='att2', type='classification', size=2),
]
else:
attribute_heads_config = None
model_config = retinanet_cfg.RetinaNet(
num_classes=num_classes,
backbone=backbones.Backbone(
type=backbone_type,
spinenet_mobile=backbones.SpineNetMobile(
model_id='49',
stochastic_depth_drop_rate=0.2,
min_level=3,
max_level=7,
use_keras_upsampling_2d=True)),
head=retinanet_cfg.RetinaNetHead(
attribute_heads=attribute_heads_config))
l2_regularizer = tf.keras.regularizers.l2(5e-5)
quantization_config = common.Quantization()
model = factory.build_retinanet(
input_specs=input_specs,
model_config=model_config,
l2_regularizer=l2_regularizer)
_ = qat_factory.build_qat_retinanet(
model=model,
quantization=quantization_config,
model_config=model_config)
if has_attribute_heads:
self.assertEqual(model_config.head.attribute_heads[0].as_dict(),
dict(name='att1', type='regression', size=1))
self.assertEqual(model_config.head.attribute_heads[1].as_dict(),
dict(name='att2', type='classification', size=2))
class SegmentationModelBuilderTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
('mobilenet', (512, 512), 5e-5),)
def test_deeplabv3_builder(self, backbone_type, input_size, weight_decay):
num_classes = 21
input_specs = tf.keras.layers.InputSpec(
shape=[None, input_size[0], input_size[1], 3])
model_config = semantic_segmentation_cfg.SemanticSegmentationModel(
num_classes=num_classes,
backbone=backbones.Backbone(
type=backbone_type,
mobilenet=backbones.MobileNet(
model_id='MobileNetV2', output_stride=16)),
decoder=decoders.Decoder(
type='aspp',
aspp=decoders.ASPP(
level=4,
num_filters=256,
dilation_rates=[],
spp_layer_version='v1',
output_tensor=True)),
head=semantic_segmentation_cfg.SegmentationHead(
level=4,
low_level=2,
num_convs=1,
upsample_factor=2,
use_depthwise_convolution=True))
l2_regularizer = (
tf.keras.regularizers.l2(weight_decay) if weight_decay else None)
model = factory.build_segmentation_model(
input_specs=input_specs,
model_config=model_config,
l2_regularizer=l2_regularizer)
quantization_config = common.Quantization()
_ = qat_factory.build_qat_segmentation_model(
model=model, quantization=quantization_config, input_specs=input_specs)
@parameterized.parameters(
('mobilenet', (512, 1024), 5e-5),)
def test_deeplabv3plus_builder(self, backbone_type, input_size, weight_decay):
num_classes = 19
input_specs = tf.keras.layers.InputSpec(
shape=[None, input_size[0], input_size[1], 3])
model_config = semantic_segmentation_cfg.SemanticSegmentationModel(
num_classes=num_classes,
backbone=backbones.Backbone(
type=backbone_type,
mobilenet=backbones.MobileNet(
model_id='MobileNetV2',
output_stride=16,
output_intermediate_endpoints=True)),
decoder=decoders.Decoder(
type='aspp',
aspp=decoders.ASPP(
level=4,
num_filters=256,
dilation_rates=[],
pool_kernel_size=[512, 1024],
use_depthwise_convolution=False,
spp_layer_version='v1',
output_tensor=True)),
head=semantic_segmentation_cfg.SegmentationHead(
level=4,
num_convs=2,
feature_fusion='deeplabv3plus',
use_depthwise_convolution=True,
low_level='2/depthwise',
low_level_num_filters=48,
prediction_kernel_size=1,
upsample_factor=1,
num_filters=256))
l2_regularizer = (
tf.keras.regularizers.l2(weight_decay) if weight_decay else None)
model = factory.build_segmentation_model(
input_specs=input_specs,
model_config=model_config,
l2_regularizer=l2_regularizer)
quantization_config = common.Quantization()
_ = qat_factory.build_qat_segmentation_model(
model=model, quantization=quantization_config, input_specs=input_specs)
if __name__ == '__main__':
tf.test.main()
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Layers package definition."""
from official.projects.qat.vision.modeling.layers.nn_blocks import BottleneckBlockQuantized
from official.projects.qat.vision.modeling.layers.nn_blocks import Conv2DBNBlockQuantized
from official.projects.qat.vision.modeling.layers.nn_blocks import InvertedBottleneckBlockQuantized
This diff is collapsed.
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment