Commit ad710aa1 authored by Allen Wang's avatar Allen Wang Committed by A. Unique TensorFlower
Browse files

Move ResNet related files to the ResNet folder (e.g. utils, CTL, CFit, etc.)

PiperOrigin-RevId: 301185629
parent adc01cd7
......@@ -23,7 +23,7 @@ import tensorflow as tf # pylint: disable=g-bad-import-order
from official.benchmark import keras_benchmark
from official.utils.testing import benchmark_wrappers
from official.vision.image_classification import resnet_imagenet_main
from official.vision.image_classification.resnet import resnet_imagenet_main
MIN_TOP_1_ACCURACY = 0.76
MAX_TOP_1_ACCURACY = 0.77
......
......@@ -27,8 +27,8 @@ from official.utils.flags import core as flags_core
from official.utils.logs import logger
from official.utils.misc import distribution_utils
from official.utils.misc import keras_utils
from official.vision.image_classification import cifar_preprocessing
from official.vision.image_classification import common
from official.vision.image_classification.resnet import cifar_preprocessing
from official.vision.image_classification.resnet import common
LR_SCHEDULE = [ # (multiplier, epoch to start) tuples
......
......@@ -27,7 +27,7 @@ from tensorflow.python.platform import googletest
from official.benchmark.models import resnet_cifar_main
from official.utils.misc import keras_utils
from official.utils.testing import integration
from official.vision.image_classification import cifar_preprocessing
from official.vision.image_classification.resnet import cifar_preprocessing
class KerasCifarTest(googletest.TestCase):
......
......@@ -22,8 +22,8 @@ import time
from absl import flags
import tensorflow as tf
from official.vision.image_classification import common
from official.vision.image_classification import resnet_ctl_imagenet_main
from official.vision.image_classification.resnet import common
from official.vision.image_classification.resnet import resnet_ctl_imagenet_main
from official.utils.testing.perfzero_benchmark import PerfZeroBenchmark
from official.utils.testing import benchmark_wrappers
from official.utils.flags import core as flags_core
......
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the common module."""
from __future__ import absolute_import
from __future__ import print_function
# pylint: disable=g-bad-import-order
from mock import Mock
import numpy as np
import tensorflow as tf
from tensorflow.python.platform import googletest
from official.utils.misc import keras_utils
from official.vision.image_classification import common
class KerasCommonTests(tf.test.TestCase):
"""Tests for common."""
@classmethod
def setUpClass(cls): # pylint: disable=invalid-name
super(KerasCommonTests, cls).setUpClass()
def test_build_stats(self):
history = self._build_history(1.145, cat_accuracy=.99988)
eval_output = self._build_eval_output(.56432111, 5.990)
th = keras_utils.TimeHistory(128, 100)
th.timestamp_log = [keras_utils.BatchTimestamp(0, 1),
keras_utils.BatchTimestamp(1, 2),
keras_utils.BatchTimestamp(2, 3)]
th.train_finish_time = 12345
stats = common.build_stats(history, eval_output, [th])
self.assertEqual(1.145, stats['loss'])
self.assertEqual(.99988, stats['training_accuracy_top_1'])
self.assertEqual(.56432111, stats['accuracy_top_1'])
self.assertEqual(5.990, stats['eval_loss'])
self.assertEqual(3, stats['step_timestamp_log'][2].timestamp)
self.assertEqual(12345, stats['train_finish_time'])
def test_build_stats_sparse(self):
history = self._build_history(1.145, cat_accuracy_sparse=.99988)
eval_output = self._build_eval_output(.928, 1.9844)
stats = common.build_stats(history, eval_output, None)
self.assertEqual(1.145, stats['loss'])
self.assertEqual(.99988, stats['training_accuracy_top_1'])
self.assertEqual(.928, stats['accuracy_top_1'])
self.assertEqual(1.9844, stats['eval_loss'])
def test_time_history(self):
th = keras_utils.TimeHistory(batch_size=128, log_steps=3)
th.on_train_begin()
th.on_batch_begin(0)
th.on_batch_end(0)
th.on_batch_begin(1)
th.on_batch_end(1)
th.on_batch_begin(2)
th.on_batch_end(2)
th.on_batch_begin(3)
th.on_batch_end(3)
th.on_batch_begin(4)
th.on_batch_end(4)
th.on_batch_begin(5)
th.on_batch_end(5)
th.on_batch_begin(6)
th.on_batch_end(6)
th.on_train_end()
self.assertEqual(3, len(th.timestamp_log))
def _build_history(self, loss, cat_accuracy=None,
cat_accuracy_sparse=None):
history_p = Mock()
history = {}
history_p.history = history
history['loss'] = [np.float64(loss)]
if cat_accuracy:
history['categorical_accuracy'] = [np.float64(cat_accuracy)]
if cat_accuracy_sparse:
history['sparse_categorical_accuracy'] = [np.float64(cat_accuracy_sparse)]
return history_p
def _build_eval_output(self, top_1, eval_loss):
eval_output = [np.float64(eval_loss), np.float64(top_1)]
return eval_output
if __name__ == '__main__':
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
googletest.main()
......@@ -28,7 +28,7 @@ import tensorflow_datasets as tfds
from official.utils.flags import core as flags_core
from official.utils.misc import distribution_utils
from official.utils.misc import model_helpers
from official.vision.image_classification import common
from official.vision.image_classification.resnet import common
FLAGS = flags.FLAGS
......
......@@ -22,7 +22,7 @@ import os
from absl import logging
import tensorflow as tf
from official.vision.image_classification import imagenet_preprocessing
from official.vision.image_classification.resnet import imagenet_preprocessing
HEIGHT = 32
WIDTH = 32
......
......@@ -30,9 +30,9 @@ from official.utils.logs import logger
from official.utils.misc import distribution_utils
from official.utils.misc import keras_utils
from official.utils.misc import model_helpers
from official.vision.image_classification import common
from official.vision.image_classification import imagenet_preprocessing
from official.vision.image_classification import resnet_runnable
from official.vision.image_classification.resnet import common
from official.vision.image_classification.resnet import imagenet_preprocessing
from official.vision.image_classification.resnet import resnet_runnable
flags.DEFINE_boolean(name='use_tf_function', default=True,
help='Wrap the train and test step inside a '
......
......@@ -34,8 +34,8 @@ from official.utils.logs import logger
from official.utils.misc import distribution_utils
from official.utils.misc import keras_utils
from official.utils.misc import model_helpers
from official.vision.image_classification import common
from official.vision.image_classification import imagenet_preprocessing
from official.vision.image_classification.resnet import common
from official.vision.image_classification.resnet import imagenet_preprocessing
from official.vision.image_classification.resnet import resnet_model
......
......@@ -33,7 +33,7 @@ from tensorflow.python.keras import backend
from tensorflow.python.keras import initializers
from tensorflow.python.keras import models
from tensorflow.python.keras import regularizers
from official.vision.image_classification import imagenet_preprocessing
from official.vision.image_classification.resnet import imagenet_preprocessing
L2_WEIGHT_DECAY = 1e-4
BATCH_NORM_DECAY = 0.9
......
......@@ -25,8 +25,8 @@ from official.staging.training import grad_utils
from official.staging.training import standard_runnable
from official.staging.training import utils
from official.utils.flags import core as flags_core
from official.vision.image_classification import common
from official.vision.image_classification import imagenet_preprocessing
from official.vision.image_classification.resnet import common
from official.vision.image_classification.resnet import imagenet_preprocessing
from official.vision.image_classification.resnet import resnet_model
......
......@@ -26,7 +26,7 @@ from absl import flags
import tensorflow.compat.v2 as tf
from official.vision.image_classification import imagenet_preprocessing
from official.vision.image_classification.resnet import imagenet_preprocessing
from official.vision.image_classification.resnet import resnet_model
FLAGS = flags.FLAGS
......
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test the ResNet model with ImageNet data using CTL."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tempfile
import os
import tensorflow.compat.v2 as tf
from tensorflow.python.eager import context
from official.utils.testing import integration
from official.vision.image_classification import common
from official.vision.image_classification import imagenet_preprocessing
from official.vision.image_classification import resnet_ctl_imagenet_main
class CtlImagenetTest(tf.test.TestCase):
"""Unit tests for Keras ResNet with ImageNet using CTL."""
_extra_flags = [
'-batch_size', '4',
'-train_steps', '4',
'-use_synthetic_data', 'true'
]
_tempdir = None
def get_temp_dir(self):
if not self._tempdir:
self._tempdir = tempfile.mkdtemp(
dir=super(CtlImagenetTest, self).get_temp_dir())
return self._tempdir
@classmethod
def setUpClass(cls):
super(CtlImagenetTest, cls).setUpClass()
common.define_keras_flags()
def setUp(self):
super(CtlImagenetTest, self).setUp()
imagenet_preprocessing.NUM_IMAGES['validation'] = 4
self.policy = \
tf.compat.v2.keras.mixed_precision.experimental.global_policy()
def tearDown(self):
super(CtlImagenetTest, self).tearDown()
tf.io.gfile.rmtree(self.get_temp_dir())
tf.compat.v2.keras.mixed_precision.experimental.set_policy(self.policy)
def test_end_to_end_no_dist_strat(self):
"""Test Keras model with 1 GPU, no distribution strategy."""
model_dir = os.path.join(self.get_temp_dir(), 'ctl_imagenet_no_dist_strat')
extra_flags = [
'-distribution_strategy', 'off',
'-model_dir', model_dir,
'-data_format', 'channels_last',
]
extra_flags = extra_flags + self._extra_flags
integration.run_synthetic(
main=resnet_ctl_imagenet_main.run,
tmp_root=self.get_temp_dir(),
extra_flags=extra_flags
)
def test_end_to_end_2_gpu(self):
"""Test Keras model with 2 GPUs."""
num_gpus = '2'
if context.num_gpus() < 2:
num_gpus = '0'
model_dir = os.path.join(self.get_temp_dir(), 'ctl_imagenet_2_gpu')
extra_flags = [
'-num_gpus', num_gpus,
'-distribution_strategy', 'mirrored',
'-model_dir', model_dir,
'-data_format', 'channels_last',
]
extra_flags = extra_flags + self._extra_flags
integration.run_synthetic(
main=resnet_ctl_imagenet_main.run,
tmp_root=self.get_temp_dir(),
extra_flags=extra_flags
)
if __name__ == '__main__':
assert tf.version.VERSION.startswith('2.')
tf.test.main()
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test the keras ResNet model with ImageNet data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import tensorflow as tf
from tensorflow.python.eager import context
from official.utils.misc import keras_utils
from official.utils.testing import integration
from official.vision.image_classification import imagenet_preprocessing
from official.vision.image_classification import resnet_imagenet_main
@parameterized.parameters(
"resnet",
"resnet_polynomial_decay",
"mobilenet",
"mobilenet_polynomial_decay")
class KerasImagenetTest(tf.test.TestCase):
"""Unit tests for Keras Models with ImageNet."""
_default_flags_dict = [
"-batch_size", "4",
"-train_steps", "1",
"-use_synthetic_data", "true",
"-data_format", "channels_last",
]
_extra_flags_dict = {
"resnet": [
"-model", "resnet50_v1.5",
"-optimizer", "resnet50_default",
],
"resnet_polynomial_decay": [
"-model", "resnet50_v1.5",
"-optimizer", "resnet50_default",
"-pruning_method", "polynomial_decay",
],
"mobilenet": [
"-model", "mobilenet",
"-optimizer", "mobilenet_default",
],
"mobilenet_polynomial_decay": [
"-model", "mobilenet",
"-optimizer", "mobilenet_default",
"-pruning_method", "polynomial_decay",
],
}
_tempdir = None
@classmethod
def setUpClass(cls): # pylint: disable=invalid-name
super(KerasImagenetTest, cls).setUpClass()
resnet_imagenet_main.define_imagenet_keras_flags()
def setUp(self):
super(KerasImagenetTest, self).setUp()
imagenet_preprocessing.NUM_IMAGES["validation"] = 4
self.policy = \
tf.compat.v2.keras.mixed_precision.experimental.global_policy()
def tearDown(self):
super(KerasImagenetTest, self).tearDown()
tf.io.gfile.rmtree(self.get_temp_dir())
tf.compat.v2.keras.mixed_precision.experimental.set_policy(self.policy)
def get_extra_flags_dict(self, flags_key):
return self._extra_flags_dict[flags_key] + self._default_flags_dict
def test_end_to_end_no_dist_strat(self, flags_key):
"""Test Keras model with 1 GPU, no distribution strategy."""
config = keras_utils.get_config_proto_v1()
tf.compat.v1.enable_eager_execution(config=config)
extra_flags = [
"-distribution_strategy", "off",
]
extra_flags = extra_flags + self.get_extra_flags_dict(flags_key)
integration.run_synthetic(
main=resnet_imagenet_main.run,
tmp_root=self.get_temp_dir(),
extra_flags=extra_flags
)
def test_end_to_end_graph_no_dist_strat(self, flags_key):
"""Test Keras model in legacy graph mode with 1 GPU, no dist strat."""
extra_flags = [
"-enable_eager", "false",
"-distribution_strategy", "off",
]
extra_flags = extra_flags + self.get_extra_flags_dict(flags_key)
integration.run_synthetic(
main=resnet_imagenet_main.run,
tmp_root=self.get_temp_dir(),
extra_flags=extra_flags
)
def test_end_to_end_1_gpu(self, flags_key):
"""Test Keras model with 1 GPU."""
config = keras_utils.get_config_proto_v1()
tf.compat.v1.enable_eager_execution(config=config)
if context.num_gpus() < 1:
self.skipTest(
"{} GPUs are not available for this test. {} GPUs are available".
format(1, context.num_gpus()))
extra_flags = [
"-num_gpus", "1",
"-distribution_strategy", "mirrored",
"-enable_checkpoint_and_export", "1",
]
extra_flags = extra_flags + self.get_extra_flags_dict(flags_key)
integration.run_synthetic(
main=resnet_imagenet_main.run,
tmp_root=self.get_temp_dir(),
extra_flags=extra_flags
)
def test_end_to_end_1_gpu_fp16(self, flags_key):
"""Test Keras model with 1 GPU and fp16."""
config = keras_utils.get_config_proto_v1()
tf.compat.v1.enable_eager_execution(config=config)
if context.num_gpus() < 1:
self.skipTest(
"{} GPUs are not available for this test. {} GPUs are available"
.format(1, context.num_gpus()))
extra_flags = [
"-num_gpus", "1",
"-dtype", "fp16",
"-distribution_strategy", "mirrored",
]
extra_flags = extra_flags + self.get_extra_flags_dict(flags_key)
if "polynomial_decay" in extra_flags:
self.skipTest("Pruning with fp16 is not currently supported.")
integration.run_synthetic(
main=resnet_imagenet_main.run,
tmp_root=self.get_temp_dir(),
extra_flags=extra_flags
)
def test_end_to_end_2_gpu(self, flags_key):
"""Test Keras model with 2 GPUs."""
config = keras_utils.get_config_proto_v1()
tf.compat.v1.enable_eager_execution(config=config)
if context.num_gpus() < 2:
self.skipTest(
"{} GPUs are not available for this test. {} GPUs are available".
format(2, context.num_gpus()))
extra_flags = [
"-num_gpus", "2",
"-distribution_strategy", "mirrored",
]
extra_flags = extra_flags + self.get_extra_flags_dict(flags_key)
integration.run_synthetic(
main=resnet_imagenet_main.run,
tmp_root=self.get_temp_dir(),
extra_flags=extra_flags
)
def test_end_to_end_xla_2_gpu(self, flags_key):
"""Test Keras model with XLA and 2 GPUs."""
config = keras_utils.get_config_proto_v1()
tf.compat.v1.enable_eager_execution(config=config)
if context.num_gpus() < 2:
self.skipTest(
"{} GPUs are not available for this test. {} GPUs are available".
format(2, context.num_gpus()))
extra_flags = [
"-num_gpus", "2",
"-enable_xla", "true",
"-distribution_strategy", "mirrored",
]
extra_flags = extra_flags + self.get_extra_flags_dict(flags_key)
integration.run_synthetic(
main=resnet_imagenet_main.run,
tmp_root=self.get_temp_dir(),
extra_flags=extra_flags
)
def test_end_to_end_2_gpu_fp16(self, flags_key):
"""Test Keras model with 2 GPUs and fp16."""
config = keras_utils.get_config_proto_v1()
tf.compat.v1.enable_eager_execution(config=config)
if context.num_gpus() < 2:
self.skipTest(
"{} GPUs are not available for this test. {} GPUs are available".
format(2, context.num_gpus()))
extra_flags = [
"-num_gpus", "2",
"-dtype", "fp16",
"-distribution_strategy", "mirrored",
]
extra_flags = extra_flags + self.get_extra_flags_dict(flags_key)
if "polynomial_decay" in extra_flags:
self.skipTest("Pruning with fp16 is not currently supported.")
integration.run_synthetic(
main=resnet_imagenet_main.run,
tmp_root=self.get_temp_dir(),
extra_flags=extra_flags
)
def test_end_to_end_xla_2_gpu_fp16(self, flags_key):
"""Test Keras model with XLA, 2 GPUs and fp16."""
config = keras_utils.get_config_proto_v1()
tf.compat.v1.enable_eager_execution(config=config)
if context.num_gpus() < 2:
self.skipTest(
"{} GPUs are not available for this test. {} GPUs are available".
format(2, context.num_gpus()))
extra_flags = [
"-num_gpus", "2",
"-dtype", "fp16",
"-enable_xla", "true",
"-distribution_strategy", "mirrored",
]
extra_flags = extra_flags + self.get_extra_flags_dict(flags_key)
if "polynomial_decay" in extra_flags:
self.skipTest("Pruning with fp16 is not currently supported.")
integration.run_synthetic(
main=resnet_imagenet_main.run,
tmp_root=self.get_temp_dir(),
extra_flags=extra_flags
)
if __name__ == "__main__":
tf.compat.v1.enable_v2_behavior()
tf.test.main()
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment