Commit b2c9e3f5 authored by Goldie Gadde's avatar Goldie Gadde Committed by Toby Boyd
Browse files

Revert "Revert "tf_upgrade_v2 on resnet and utils folders. (#6154)" (#6162)" (#6167)

This reverts commit 57e07520.
parent 57e07520
...@@ -60,4 +60,4 @@ def main(_): ...@@ -60,4 +60,4 @@ def main(_):
if __name__ == '__main__': if __name__ == '__main__':
FLAGS, unparsed = parser.parse_known_args() FLAGS, unparsed = parser.parse_known_args()
tf.app.run(argv=[sys.argv[0]] + unparsed) tf.compat.v1.app.run(argv=[sys.argv[0]] + unparsed)
...@@ -52,7 +52,7 @@ DATASET_NAME = 'CIFAR-10' ...@@ -52,7 +52,7 @@ DATASET_NAME = 'CIFAR-10'
############################################################################### ###############################################################################
def get_filenames(is_training, data_dir): def get_filenames(is_training, data_dir):
"""Returns a list of filenames.""" """Returns a list of filenames."""
assert tf.gfile.Exists(data_dir), ( assert tf.io.gfile.exists(data_dir), (
'Run cifar10_download_and_extract.py first to download and extract the ' 'Run cifar10_download_and_extract.py first to download and extract the '
'CIFAR-10 data.') 'CIFAR-10 data.')
...@@ -68,7 +68,7 @@ def get_filenames(is_training, data_dir): ...@@ -68,7 +68,7 @@ def get_filenames(is_training, data_dir):
def parse_record(raw_record, is_training, dtype): def parse_record(raw_record, is_training, dtype):
"""Parse CIFAR-10 image and label from a raw record.""" """Parse CIFAR-10 image and label from a raw record."""
# Convert bytes to a vector of uint8 that is record_bytes long. # Convert bytes to a vector of uint8 that is record_bytes long.
record_vector = tf.decode_raw(raw_record, tf.uint8) record_vector = tf.io.decode_raw(raw_record, tf.uint8)
# The first byte represents the label, which we convert from uint8 to int32 # The first byte represents the label, which we convert from uint8 to int32
# and then to one-hot. # and then to one-hot.
...@@ -81,7 +81,7 @@ def parse_record(raw_record, is_training, dtype): ...@@ -81,7 +81,7 @@ def parse_record(raw_record, is_training, dtype):
# Convert from [depth, height, width] to [height, width, depth], and cast as # Convert from [depth, height, width] to [height, width, depth], and cast as
# float32. # float32.
image = tf.cast(tf.transpose(depth_major, [1, 2, 0]), tf.float32) image = tf.cast(tf.transpose(a=depth_major, perm=[1, 2, 0]), tf.float32)
image = preprocess_image(image, is_training) image = preprocess_image(image, is_training)
image = tf.cast(image, dtype) image = tf.cast(image, dtype)
...@@ -97,7 +97,7 @@ def preprocess_image(image, is_training): ...@@ -97,7 +97,7 @@ def preprocess_image(image, is_training):
image, HEIGHT + 8, WIDTH + 8) image, HEIGHT + 8, WIDTH + 8)
# Randomly crop a [HEIGHT, WIDTH] section of the image. # Randomly crop a [HEIGHT, WIDTH] section of the image.
image = tf.random_crop(image, [HEIGHT, WIDTH, NUM_CHANNELS]) image = tf.image.random_crop(image, [HEIGHT, WIDTH, NUM_CHANNELS])
# Randomly flip the image horizontally. # Randomly flip the image horizontally.
image = tf.image.random_flip_left_right(image) image = tf.image.random_flip_left_right(image)
...@@ -253,8 +253,9 @@ def run_cifar(flags_obj): ...@@ -253,8 +253,9 @@ def run_cifar(flags_obj):
Dictionary of results. Including final accuracy. Dictionary of results. Including final accuracy.
""" """
if flags_obj.image_bytes_as_serving_input: if flags_obj.image_bytes_as_serving_input:
tf.logging.fatal('--image_bytes_as_serving_input cannot be set to True ' tf.compat.v1.logging.fatal(
'for CIFAR. This flag is only applicable to ImageNet.') '--image_bytes_as_serving_input cannot be set to True for CIFAR. '
'This flag is only applicable to ImageNet.')
return return
input_function = (flags_obj.use_synthetic_data and input_function = (flags_obj.use_synthetic_data and
...@@ -273,6 +274,6 @@ def main(_): ...@@ -273,6 +274,6 @@ def main(_):
if __name__ == '__main__': if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO) tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
define_cifar_flags() define_cifar_flags()
absl_app.run(main) absl_app.run(main)
...@@ -25,7 +25,7 @@ import tensorflow as tf # pylint: disable=g-bad-import-order ...@@ -25,7 +25,7 @@ import tensorflow as tf # pylint: disable=g-bad-import-order
from official.resnet import cifar10_main from official.resnet import cifar10_main
from official.utils.testing import integration from official.utils.testing import integration
tf.logging.set_verbosity(tf.logging.ERROR) tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
_BATCH_SIZE = 128 _BATCH_SIZE = 128
_HEIGHT = 32 _HEIGHT = 32
...@@ -44,7 +44,7 @@ class BaseTest(tf.test.TestCase): ...@@ -44,7 +44,7 @@ class BaseTest(tf.test.TestCase):
def tearDown(self): def tearDown(self):
super(BaseTest, self).tearDown() super(BaseTest, self).tearDown()
tf.gfile.DeleteRecursively(self.get_temp_dir()) tf.io.gfile.rmtree(self.get_temp_dir())
def test_dataset_input_fn(self): def test_dataset_input_fn(self):
fake_data = bytearray() fake_data = bytearray()
...@@ -62,7 +62,8 @@ class BaseTest(tf.test.TestCase): ...@@ -62,7 +62,8 @@ class BaseTest(tf.test.TestCase):
filename, cifar10_main._RECORD_BYTES) # pylint: disable=protected-access filename, cifar10_main._RECORD_BYTES) # pylint: disable=protected-access
fake_dataset = fake_dataset.map( fake_dataset = fake_dataset.map(
lambda val: cifar10_main.parse_record(val, False, tf.float32)) lambda val: cifar10_main.parse_record(val, False, tf.float32))
image, label = fake_dataset.make_one_shot_iterator().get_next() image, label = tf.compat.v1.data.make_one_shot_iterator(
fake_dataset).get_next()
self.assertAllEqual(label.shape, ()) self.assertAllEqual(label.shape, ())
self.assertAllEqual(image.shape, (_HEIGHT, _WIDTH, _NUM_CHANNELS)) self.assertAllEqual(image.shape, (_HEIGHT, _WIDTH, _NUM_CHANNELS))
...@@ -79,7 +80,7 @@ class BaseTest(tf.test.TestCase): ...@@ -79,7 +80,7 @@ class BaseTest(tf.test.TestCase):
def cifar10_model_fn_helper(self, mode, resnet_version, dtype): def cifar10_model_fn_helper(self, mode, resnet_version, dtype):
input_fn = cifar10_main.get_synth_input_fn(dtype) input_fn = cifar10_main.get_synth_input_fn(dtype)
dataset = input_fn(True, '', _BATCH_SIZE) dataset = input_fn(True, '', _BATCH_SIZE)
iterator = dataset.make_initializable_iterator() iterator = tf.compat.v1.data.make_initializable_iterator(dataset)
features, labels = iterator.get_next() features, labels = iterator.get_next()
spec = cifar10_main.cifar10_model_fn( spec = cifar10_main.cifar10_model_fn(
features, labels, mode, { features, labels, mode, {
...@@ -142,7 +143,7 @@ class BaseTest(tf.test.TestCase): ...@@ -142,7 +143,7 @@ class BaseTest(tf.test.TestCase):
model = cifar10_main.Cifar10Model(32, data_format='channels_last', model = cifar10_main.Cifar10Model(32, data_format='channels_last',
num_classes=num_classes, num_classes=num_classes,
resnet_version=resnet_version) resnet_version=resnet_version)
fake_input = tf.random_uniform([batch_size, _HEIGHT, _WIDTH, _NUM_CHANNELS]) fake_input = tf.random.uniform([batch_size, _HEIGHT, _WIDTH, _NUM_CHANNELS])
output = model(fake_input, training=True) output = model(fake_input, training=True)
self.assertAllEqual(output.shape, (batch_size, num_classes)) self.assertAllEqual(output.shape, (batch_size, num_classes))
......
...@@ -144,7 +144,7 @@ class EstimatorCifar10BenchmarkTests(tf.test.Benchmark): ...@@ -144,7 +144,7 @@ class EstimatorCifar10BenchmarkTests(tf.test.Benchmark):
return os.path.join(self.output_dir, folder_name) return os.path.join(self.output_dir, folder_name)
def _setup(self): def _setup(self):
tf.logging.set_verbosity(tf.logging.DEBUG) tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.DEBUG)
if EstimatorCifar10BenchmarkTests.local_flags is None: if EstimatorCifar10BenchmarkTests.local_flags is None:
cifar_main.define_cifar_flags() cifar_main.define_cifar_flags()
# Loads flags to get defaults to then override. # Loads flags to get defaults to then override.
......
...@@ -95,14 +95,14 @@ def _parse_example_proto(example_serialized): ...@@ -95,14 +95,14 @@ def _parse_example_proto(example_serialized):
""" """
# Dense features in Example proto. # Dense features in Example proto.
feature_map = { feature_map = {
'image/encoded': tf.FixedLenFeature([], dtype=tf.string, 'image/encoded': tf.io.FixedLenFeature([], dtype=tf.string,
default_value=''),
'image/class/label': tf.FixedLenFeature([], dtype=tf.int64,
default_value=-1),
'image/class/text': tf.FixedLenFeature([], dtype=tf.string,
default_value=''), default_value=''),
'image/class/label': tf.io.FixedLenFeature([], dtype=tf.int64,
default_value=-1),
'image/class/text': tf.io.FixedLenFeature([], dtype=tf.string,
default_value=''),
} }
sparse_float32 = tf.VarLenFeature(dtype=tf.float32) sparse_float32 = tf.io.VarLenFeature(dtype=tf.float32)
# Sparse features in Example proto. # Sparse features in Example proto.
feature_map.update( feature_map.update(
{k: sparse_float32 for k in ['image/object/bbox/xmin', {k: sparse_float32 for k in ['image/object/bbox/xmin',
...@@ -110,7 +110,8 @@ def _parse_example_proto(example_serialized): ...@@ -110,7 +110,8 @@ def _parse_example_proto(example_serialized):
'image/object/bbox/xmax', 'image/object/bbox/xmax',
'image/object/bbox/ymax']}) 'image/object/bbox/ymax']})
features = tf.parse_single_example(example_serialized, feature_map) features = tf.io.parse_single_example(serialized=example_serialized,
features=feature_map)
label = tf.cast(features['image/class/label'], dtype=tf.int32) label = tf.cast(features['image/class/label'], dtype=tf.int32)
xmin = tf.expand_dims(features['image/object/bbox/xmin'].values, 0) xmin = tf.expand_dims(features['image/object/bbox/xmin'].values, 0)
...@@ -124,7 +125,7 @@ def _parse_example_proto(example_serialized): ...@@ -124,7 +125,7 @@ def _parse_example_proto(example_serialized):
# Force the variable number of bounding boxes into the shape # Force the variable number of bounding boxes into the shape
# [1, num_boxes, coords]. # [1, num_boxes, coords].
bbox = tf.expand_dims(bbox, 0) bbox = tf.expand_dims(bbox, 0)
bbox = tf.transpose(bbox, [0, 2, 1]) bbox = tf.transpose(a=bbox, perm=[0, 2, 1])
return features['image/encoded'], label, bbox return features['image/encoded'], label, bbox
...@@ -188,7 +189,7 @@ def input_fn(is_training, data_dir, batch_size, num_epochs=1, ...@@ -188,7 +189,7 @@ def input_fn(is_training, data_dir, batch_size, num_epochs=1,
# This number is low enough to not cause too much contention on small systems # This number is low enough to not cause too much contention on small systems
# but high enough to provide the benefits of parallelization. You may want # but high enough to provide the benefits of parallelization. You may want
# to increase this number if you have a large number of CPU cores. # to increase this number if you have a large number of CPU cores.
dataset = dataset.apply(tf.contrib.data.parallel_interleave( dataset = dataset.apply(tf.data.experimental.parallel_interleave(
tf.data.TFRecordDataset, cycle_length=10)) tf.data.TFRecordDataset, cycle_length=10))
return resnet_run_loop.process_record_dataset( return resnet_run_loop.process_record_dataset(
...@@ -352,6 +353,6 @@ def main(_): ...@@ -352,6 +353,6 @@ def main(_):
if __name__ == '__main__': if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO) tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
define_imagenet_flags() define_imagenet_flags()
absl_app.run(main) absl_app.run(main)
...@@ -108,7 +108,7 @@ def _central_crop(image, crop_height, crop_width): ...@@ -108,7 +108,7 @@ def _central_crop(image, crop_height, crop_width):
Returns: Returns:
3-D tensor with cropped image. 3-D tensor with cropped image.
""" """
shape = tf.shape(image) shape = tf.shape(input=image)
height, width = shape[0], shape[1] height, width = shape[0], shape[1]
amount_to_be_cropped_h = (height - crop_height) amount_to_be_cropped_h = (height - crop_height)
...@@ -195,7 +195,7 @@ def _aspect_preserving_resize(image, resize_min): ...@@ -195,7 +195,7 @@ def _aspect_preserving_resize(image, resize_min):
Returns: Returns:
resized_image: A 3-D tensor containing the resized image. resized_image: A 3-D tensor containing the resized image.
""" """
shape = tf.shape(image) shape = tf.shape(input=image)
height, width = shape[0], shape[1] height, width = shape[0], shape[1]
new_height, new_width = _smallest_size_at_least(height, width, resize_min) new_height, new_width = _smallest_size_at_least(height, width, resize_min)
...@@ -218,7 +218,7 @@ def _resize_image(image, height, width): ...@@ -218,7 +218,7 @@ def _resize_image(image, height, width):
resized_image: A 3-D tensor containing the resized image. The first two resized_image: A 3-D tensor containing the resized image. The first two
dimensions have the shape [height, width]. dimensions have the shape [height, width].
""" """
return tf.image.resize_images( return tf.image.resize(
image, [height, width], method=tf.image.ResizeMethod.BILINEAR, image, [height, width], method=tf.image.ResizeMethod.BILINEAR,
align_corners=False) align_corners=False)
......
...@@ -24,7 +24,7 @@ import tensorflow as tf # pylint: disable=g-bad-import-order ...@@ -24,7 +24,7 @@ import tensorflow as tf # pylint: disable=g-bad-import-order
from official.resnet import imagenet_main from official.resnet import imagenet_main
from official.utils.testing import integration from official.utils.testing import integration
tf.logging.set_verbosity(tf.logging.ERROR) tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
_BATCH_SIZE = 32 _BATCH_SIZE = 32
_LABEL_CLASSES = 1001 _LABEL_CLASSES = 1001
...@@ -39,7 +39,7 @@ class BaseTest(tf.test.TestCase): ...@@ -39,7 +39,7 @@ class BaseTest(tf.test.TestCase):
def tearDown(self): def tearDown(self):
super(BaseTest, self).tearDown() super(BaseTest, self).tearDown()
tf.gfile.DeleteRecursively(self.get_temp_dir()) tf.io.gfile.rmtree(self.get_temp_dir())
def _tensor_shapes_helper(self, resnet_size, resnet_version, dtype, with_gpu): def _tensor_shapes_helper(self, resnet_size, resnet_version, dtype, with_gpu):
"""Checks the tensor shapes after each phase of the ResNet model.""" """Checks the tensor shapes after each phase of the ResNet model."""
...@@ -62,7 +62,7 @@ class BaseTest(tf.test.TestCase): ...@@ -62,7 +62,7 @@ class BaseTest(tf.test.TestCase):
resnet_version=resnet_version, resnet_version=resnet_version,
dtype=dtype dtype=dtype
) )
inputs = tf.random_uniform([1, 224, 224, 3]) inputs = tf.random.uniform([1, 224, 224, 3])
output = model(inputs, training=True) output = model(inputs, training=True)
initial_conv = graph.get_tensor_by_name('resnet_model/initial_conv:0') initial_conv = graph.get_tensor_by_name('resnet_model/initial_conv:0')
...@@ -189,11 +189,11 @@ class BaseTest(tf.test.TestCase): ...@@ -189,11 +189,11 @@ class BaseTest(tf.test.TestCase):
def resnet_model_fn_helper(self, mode, resnet_version, dtype): def resnet_model_fn_helper(self, mode, resnet_version, dtype):
"""Tests that the EstimatorSpec is given the appropriate arguments.""" """Tests that the EstimatorSpec is given the appropriate arguments."""
tf.train.create_global_step() tf.compat.v1.train.create_global_step()
input_fn = imagenet_main.get_synth_input_fn(dtype) input_fn = imagenet_main.get_synth_input_fn(dtype)
dataset = input_fn(True, '', _BATCH_SIZE) dataset = input_fn(True, '', _BATCH_SIZE)
iterator = dataset.make_initializable_iterator() iterator = tf.compat.v1.data.make_initializable_iterator(dataset)
features, labels = iterator.get_next() features, labels = iterator.get_next()
spec = imagenet_main.imagenet_model_fn( spec = imagenet_main.imagenet_model_fn(
features, labels, mode, { features, labels, mode, {
...@@ -257,7 +257,7 @@ class BaseTest(tf.test.TestCase): ...@@ -257,7 +257,7 @@ class BaseTest(tf.test.TestCase):
50, data_format='channels_last', num_classes=num_classes, 50, data_format='channels_last', num_classes=num_classes,
resnet_version=resnet_version) resnet_version=resnet_version)
fake_input = tf.random_uniform([batch_size, 224, 224, 3]) fake_input = tf.random.uniform([batch_size, 224, 224, 3])
output = model(fake_input, training=True) output = model(fake_input, training=True)
self.assertAllEqual(output.shape, (batch_size, num_classes)) self.assertAllEqual(output.shape, (batch_size, num_classes))
......
...@@ -43,7 +43,7 @@ class KerasBenchmark(tf.test.Benchmark): ...@@ -43,7 +43,7 @@ class KerasBenchmark(tf.test.Benchmark):
def _setup(self): def _setup(self):
"""Sets up and resets flags before each test.""" """Sets up and resets flags before each test."""
tf.logging.set_verbosity(tf.logging.DEBUG) tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.DEBUG)
if KerasBenchmark.local_flags is None: if KerasBenchmark.local_flags is None:
for flag_method in self.flag_methods: for flag_method in self.flag_methods:
flag_method() flag_method()
......
...@@ -81,7 +81,7 @@ def parse_record_keras(raw_record, is_training, dtype): ...@@ -81,7 +81,7 @@ def parse_record_keras(raw_record, is_training, dtype):
Tuple with processed image tensor and one-hot-encoded label tensor. Tuple with processed image tensor and one-hot-encoded label tensor.
""" """
image, label = cifar_main.parse_record(raw_record, is_training, dtype) image, label = cifar_main.parse_record(raw_record, is_training, dtype)
label = tf.sparse_to_dense(label, (cifar_main.NUM_CLASSES,), 1) label = tf.compat.v1.sparse_to_dense(label, (cifar_main.NUM_CLASSES,), 1)
return image, label return image, label
...@@ -98,7 +98,7 @@ def run(flags_obj): ...@@ -98,7 +98,7 @@ def run(flags_obj):
Dictionary of training and eval stats. Dictionary of training and eval stats.
""" """
if flags_obj.enable_eager: if flags_obj.enable_eager:
tf.enable_eager_execution() tf.compat.v1.enable_eager_execution()
dtype = flags_core.get_tf_dtype(flags_obj) dtype = flags_core.get_tf_dtype(flags_obj)
if dtype == 'fp16': if dtype == 'fp16':
...@@ -194,7 +194,7 @@ def main(_): ...@@ -194,7 +194,7 @@ def main(_):
if __name__ == '__main__': if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO) tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
cifar_main.define_cifar_flags() cifar_main.define_cifar_flags()
keras_common.define_keras_flags() keras_common.define_keras_flags()
absl_app.run(main) absl_app.run(main)
...@@ -80,9 +80,10 @@ class TimeHistory(tf.keras.callbacks.Callback): ...@@ -80,9 +80,10 @@ class TimeHistory(tf.keras.callbacks.Callback):
if batch != 0: if batch != 0:
self.record_batch = True self.record_batch = True
self.timestamp_log.append(BatchTimestamp(batch, timestamp)) self.timestamp_log.append(BatchTimestamp(batch, timestamp))
tf.logging.info("BenchmarkMetric: {'num_batches':%d, 'time_taken': %f," tf.compat.v1.logging.info(
"'images_per_second': %f}" % "BenchmarkMetric: {'num_batches':%d, 'time_taken': %f,"
(batch, elapsed_time, examples_per_second)) "'images_per_second': %f}" %
(batch, elapsed_time, examples_per_second))
class LearningRateBatchScheduler(tf.keras.callbacks.Callback): class LearningRateBatchScheduler(tf.keras.callbacks.Callback):
...@@ -120,8 +121,9 @@ class LearningRateBatchScheduler(tf.keras.callbacks.Callback): ...@@ -120,8 +121,9 @@ class LearningRateBatchScheduler(tf.keras.callbacks.Callback):
if lr != self.prev_lr: if lr != self.prev_lr:
self.model.optimizer.learning_rate = lr # lr should be a float here self.model.optimizer.learning_rate = lr # lr should be a float here
self.prev_lr = lr self.prev_lr = lr
tf.logging.debug('Epoch %05d Batch %05d: LearningRateBatchScheduler ' tf.compat.v1.logging.debug(
'change learning rate to %s.', self.epochs, batch, lr) 'Epoch %05d Batch %05d: LearningRateBatchScheduler '
'change learning rate to %s.', self.epochs, batch, lr)
def get_optimizer(): def get_optimizer():
...@@ -226,22 +228,20 @@ def get_synth_input_fn(height, width, num_channels, num_classes, ...@@ -226,22 +228,20 @@ def get_synth_input_fn(height, width, num_channels, num_classes,
def input_fn(is_training, data_dir, batch_size, *args, **kwargs): def input_fn(is_training, data_dir, batch_size, *args, **kwargs):
"""Returns dataset filled with random data.""" """Returns dataset filled with random data."""
# Synthetic input should be within [0, 255]. # Synthetic input should be within [0, 255].
inputs = tf.truncated_normal( inputs = tf.random.truncated_normal([height, width, num_channels],
[height, width, num_channels], dtype=dtype,
dtype=dtype, mean=127,
mean=127, stddev=60,
stddev=60, name='synthetic_inputs')
name='synthetic_inputs')
labels = tf.random.uniform([1],
labels = tf.random_uniform( minval=0,
[1], maxval=num_classes - 1,
minval=0, dtype=tf.int32,
maxval=num_classes - 1, name='synthetic_labels')
dtype=tf.int32,
name='synthetic_labels')
data = tf.data.Dataset.from_tensors((inputs, labels)).repeat() data = tf.data.Dataset.from_tensors((inputs, labels)).repeat()
data = data.batch(batch_size) data = data.batch(batch_size)
data = data.prefetch(buffer_size=tf.contrib.data.AUTOTUNE) data = data.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
return data return data
return input_fn return input_fn
......
...@@ -22,7 +22,7 @@ import tensorflow as tf # pylint: disable=g-bad-import-order ...@@ -22,7 +22,7 @@ import tensorflow as tf # pylint: disable=g-bad-import-order
from official.resnet.keras import keras_common from official.resnet.keras import keras_common
tf.logging.set_verbosity(tf.logging.ERROR) tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
class KerasCommonTests(tf.test.TestCase): class KerasCommonTests(tf.test.TestCase):
......
...@@ -88,7 +88,7 @@ def run(flags_obj): ...@@ -88,7 +88,7 @@ def run(flags_obj):
ValueError: If fp16 is passed as it is not currently supported. ValueError: If fp16 is passed as it is not currently supported.
""" """
if flags_obj.enable_eager: if flags_obj.enable_eager:
tf.enable_eager_execution() tf.compat.v1.enable_eager_execution()
dtype = flags_core.get_tf_dtype(flags_obj) dtype = flags_core.get_tf_dtype(flags_obj)
if dtype == 'fp16': if dtype == 'fp16':
...@@ -187,7 +187,7 @@ def main(_): ...@@ -187,7 +187,7 @@ def main(_):
if __name__ == '__main__': if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO) tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
imagenet_main.define_imagenet_flags() imagenet_main.define_imagenet_flags()
keras_common.define_keras_flags() keras_common.define_keras_flags()
absl_app.run(main) absl_app.run(main)
...@@ -72,10 +72,10 @@ class BaseTest(reference_data.BaseTest): ...@@ -72,10 +72,10 @@ class BaseTest(reference_data.BaseTest):
g = tf.Graph() g = tf.Graph()
with g.as_default(): with g.as_default():
tf.set_random_seed(self.name_to_seed(name)) tf.compat.v1.set_random_seed(self.name_to_seed(name))
input_tensor = tf.get_variable( input_tensor = tf.compat.v1.get_variable(
"input_tensor", dtype=tf.float32, "input_tensor", dtype=tf.float32,
initializer=tf.random_uniform((32, 16, 16, 3), maxval=1) initializer=tf.random.uniform((32, 16, 16, 3), maxval=1)
) )
layer = resnet_model.batch_norm( layer = resnet_model.batch_norm(
inputs=input_tensor, data_format=DATA_FORMAT, training=True) inputs=input_tensor, data_format=DATA_FORMAT, training=True)
...@@ -137,7 +137,7 @@ class BaseTest(reference_data.BaseTest): ...@@ -137,7 +137,7 @@ class BaseTest(reference_data.BaseTest):
g = tf.Graph() g = tf.Graph()
with g.as_default(): with g.as_default():
tf.set_random_seed(self.name_to_seed(name)) tf.compat.v1.set_random_seed(self.name_to_seed(name))
strides = 1 strides = 1
channels_out = channels channels_out = channels
projection_shortcut = None projection_shortcut = None
...@@ -151,9 +151,9 @@ class BaseTest(reference_data.BaseTest): ...@@ -151,9 +151,9 @@ class BaseTest(reference_data.BaseTest):
if bottleneck: if bottleneck:
filters = channels_out // 4 filters = channels_out // 4
input_tensor = tf.get_variable( input_tensor = tf.compat.v1.get_variable(
"input_tensor", dtype=tf.float32, "input_tensor", dtype=tf.float32,
initializer=tf.random_uniform((batch_size, width, width, channels), initializer=tf.random.uniform((batch_size, width, width, channels),
maxval=1) maxval=1)
) )
......
...@@ -48,7 +48,7 @@ def batch_norm(inputs, training, data_format): ...@@ -48,7 +48,7 @@ def batch_norm(inputs, training, data_format):
"""Performs a batch normalization using a standard set of parameters.""" """Performs a batch normalization using a standard set of parameters."""
# We set fused=True for a significant performance boost. See # We set fused=True for a significant performance boost. See
# https://www.tensorflow.org/performance/performance_guide#common_fused_ops # https://www.tensorflow.org/performance/performance_guide#common_fused_ops
return tf.layers.batch_normalization( return tf.compat.v1.layers.batch_normalization(
inputs=inputs, axis=1 if data_format == 'channels_first' else 3, inputs=inputs, axis=1 if data_format == 'channels_first' else 3,
momentum=_BATCH_NORM_DECAY, epsilon=_BATCH_NORM_EPSILON, center=True, momentum=_BATCH_NORM_DECAY, epsilon=_BATCH_NORM_EPSILON, center=True,
scale=True, training=training, fused=True) scale=True, training=training, fused=True)
...@@ -73,11 +73,13 @@ def fixed_padding(inputs, kernel_size, data_format): ...@@ -73,11 +73,13 @@ def fixed_padding(inputs, kernel_size, data_format):
pad_end = pad_total - pad_beg pad_end = pad_total - pad_beg
if data_format == 'channels_first': if data_format == 'channels_first':
padded_inputs = tf.pad(inputs, [[0, 0], [0, 0], padded_inputs = tf.pad(tensor=inputs,
[pad_beg, pad_end], [pad_beg, pad_end]]) paddings=[[0, 0], [0, 0], [pad_beg, pad_end],
[pad_beg, pad_end]])
else: else:
padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end], padded_inputs = tf.pad(tensor=inputs,
[pad_beg, pad_end], [0, 0]]) paddings=[[0, 0], [pad_beg, pad_end],
[pad_beg, pad_end], [0, 0]])
return padded_inputs return padded_inputs
...@@ -88,10 +90,10 @@ def conv2d_fixed_padding(inputs, filters, kernel_size, strides, data_format): ...@@ -88,10 +90,10 @@ def conv2d_fixed_padding(inputs, filters, kernel_size, strides, data_format):
if strides > 1: if strides > 1:
inputs = fixed_padding(inputs, kernel_size, data_format) inputs = fixed_padding(inputs, kernel_size, data_format)
return tf.layers.conv2d( return tf.compat.v1.layers.conv2d(
inputs=inputs, filters=filters, kernel_size=kernel_size, strides=strides, inputs=inputs, filters=filters, kernel_size=kernel_size, strides=strides,
padding=('SAME' if strides == 1 else 'VALID'), use_bias=False, padding=('SAME' if strides == 1 else 'VALID'), use_bias=False,
kernel_initializer=tf.variance_scaling_initializer(), kernel_initializer=tf.compat.v1.variance_scaling_initializer(),
data_format=data_format) data_format=data_format)
...@@ -475,8 +477,8 @@ class Model(object): ...@@ -475,8 +477,8 @@ class Model(object):
A variable scope for the model. A variable scope for the model.
""" """
return tf.variable_scope('resnet_model', return tf.compat.v1.variable_scope('resnet_model',
custom_getter=self._custom_dtype_getter) custom_getter=self._custom_dtype_getter)
def __call__(self, inputs, training): def __call__(self, inputs, training):
"""Add operations to classify a batch of input images. """Add operations to classify a batch of input images.
...@@ -495,7 +497,7 @@ class Model(object): ...@@ -495,7 +497,7 @@ class Model(object):
# Convert the inputs from channels_last (NHWC) to channels_first (NCHW). # Convert the inputs from channels_last (NHWC) to channels_first (NCHW).
# This provides a large performance boost on GPU. See # This provides a large performance boost on GPU. See
# https://www.tensorflow.org/performance/performance_guide#data_formats # https://www.tensorflow.org/performance/performance_guide#data_formats
inputs = tf.transpose(inputs, [0, 3, 1, 2]) inputs = tf.transpose(a=inputs, perm=[0, 3, 1, 2])
inputs = conv2d_fixed_padding( inputs = conv2d_fixed_padding(
inputs=inputs, filters=self.num_filters, kernel_size=self.kernel_size, inputs=inputs, filters=self.num_filters, kernel_size=self.kernel_size,
...@@ -511,7 +513,7 @@ class Model(object): ...@@ -511,7 +513,7 @@ class Model(object):
inputs = tf.nn.relu(inputs) inputs = tf.nn.relu(inputs)
if self.first_pool_size: if self.first_pool_size:
inputs = tf.layers.max_pooling2d( inputs = tf.compat.v1.layers.max_pooling2d(
inputs=inputs, pool_size=self.first_pool_size, inputs=inputs, pool_size=self.first_pool_size,
strides=self.first_pool_stride, padding='SAME', strides=self.first_pool_stride, padding='SAME',
data_format=self.data_format) data_format=self.data_format)
...@@ -537,10 +539,10 @@ class Model(object): ...@@ -537,10 +539,10 @@ class Model(object):
# but that is the same as doing a reduce_mean. We do a reduce_mean # but that is the same as doing a reduce_mean. We do a reduce_mean
# here because it performs better than AveragePooling2D. # here because it performs better than AveragePooling2D.
axes = [2, 3] if self.data_format == 'channels_first' else [1, 2] axes = [2, 3] if self.data_format == 'channels_first' else [1, 2]
inputs = tf.reduce_mean(inputs, axes, keepdims=True) inputs = tf.reduce_mean(input_tensor=inputs, axis=axes, keepdims=True)
inputs = tf.identity(inputs, 'final_reduce_mean') inputs = tf.identity(inputs, 'final_reduce_mean')
inputs = tf.squeeze(inputs, axes) inputs = tf.squeeze(inputs, axes)
inputs = tf.layers.dense(inputs=inputs, units=self.num_classes) inputs = tf.compat.v1.layers.dense(inputs=inputs, units=self.num_classes)
inputs = tf.identity(inputs, 'final_dense') inputs = tf.identity(inputs, 'final_dense')
return inputs return inputs
...@@ -88,7 +88,7 @@ def process_record_dataset(dataset, ...@@ -88,7 +88,7 @@ def process_record_dataset(dataset,
# Parses the raw records into images and labels. # Parses the raw records into images and labels.
dataset = dataset.apply( dataset = dataset.apply(
tf.contrib.data.map_and_batch( tf.data.experimental.map_and_batch(
lambda value: parse_record_fn(value, is_training, dtype), lambda value: parse_record_fn(value, is_training, dtype),
batch_size=batch_size, batch_size=batch_size,
num_parallel_batches=num_parallel_batches, num_parallel_batches=num_parallel_batches,
...@@ -100,12 +100,12 @@ def process_record_dataset(dataset, ...@@ -100,12 +100,12 @@ def process_record_dataset(dataset,
# critical training path. Setting buffer_size to tf.contrib.data.AUTOTUNE # critical training path. Setting buffer_size to tf.contrib.data.AUTOTUNE
# allows DistributionStrategies to adjust how many batches to fetch based # allows DistributionStrategies to adjust how many batches to fetch based
# on how many devices are present. # on how many devices are present.
dataset = dataset.prefetch(buffer_size=tf.contrib.data.AUTOTUNE) dataset = dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
# Defines a specific size thread pool for tf.data operations. # Defines a specific size thread pool for tf.data operations.
if datasets_num_private_threads: if datasets_num_private_threads:
tf.logging.info('datasets_num_private_threads: %s', tf.compat.v1.logging.info('datasets_num_private_threads: %s',
datasets_num_private_threads) datasets_num_private_threads)
dataset = threadpool.override_threadpool( dataset = threadpool.override_threadpool(
dataset, dataset,
threadpool.PrivateThreadPool( threadpool.PrivateThreadPool(
...@@ -140,21 +140,21 @@ def get_synth_input_fn(height, width, num_channels, num_classes, ...@@ -140,21 +140,21 @@ def get_synth_input_fn(height, width, num_channels, num_classes,
def input_fn(is_training, data_dir, batch_size, *args, **kwargs): def input_fn(is_training, data_dir, batch_size, *args, **kwargs):
"""Returns dataset filled with random data.""" """Returns dataset filled with random data."""
# Synthetic input should be within [0, 255]. # Synthetic input should be within [0, 255].
inputs = tf.truncated_normal( inputs = tf.random.truncated_normal(
[batch_size] + [height, width, num_channels], [batch_size] + [height, width, num_channels],
dtype=dtype, dtype=dtype,
mean=127, mean=127,
stddev=60, stddev=60,
name='synthetic_inputs') name='synthetic_inputs')
labels = tf.random_uniform( labels = tf.random.uniform(
[batch_size], [batch_size],
minval=0, minval=0,
maxval=num_classes - 1, maxval=num_classes - 1,
dtype=tf.int32, dtype=tf.int32,
name='synthetic_labels') name='synthetic_labels')
data = tf.data.Dataset.from_tensors((inputs, labels)).repeat() data = tf.data.Dataset.from_tensors((inputs, labels)).repeat()
data = data.prefetch(buffer_size=tf.contrib.data.AUTOTUNE) data = data.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
return data return data
return input_fn return input_fn
...@@ -172,7 +172,7 @@ def image_bytes_serving_input_fn(image_shape, dtype=tf.float32): ...@@ -172,7 +172,7 @@ def image_bytes_serving_input_fn(image_shape, dtype=tf.float32):
image_bytes, bbox, height, width, num_channels, is_training=False) image_bytes, bbox, height, width, num_channels, is_training=False)
return image return image
image_bytes_list = tf.placeholder( image_bytes_list = tf.compat.v1.placeholder(
shape=[None], dtype=tf.string, name='input_tensor') shape=[None], dtype=tf.string, name='input_tensor')
images = tf.map_fn( images = tf.map_fn(
_preprocess_image, image_bytes_list, back_prop=False, dtype=dtype) _preprocess_image, image_bytes_list, back_prop=False, dtype=dtype)
...@@ -197,15 +197,17 @@ def override_flags_and_set_envars_for_gpu_thread_pool(flags_obj): ...@@ -197,15 +197,17 @@ def override_flags_and_set_envars_for_gpu_thread_pool(flags_obj):
what has been set by the user on the command-line. what has been set by the user on the command-line.
""" """
cpu_count = multiprocessing.cpu_count() cpu_count = multiprocessing.cpu_count()
tf.logging.info('Logical CPU cores: %s', cpu_count) tf.compat.v1.logging.info('Logical CPU cores: %s', cpu_count)
# Sets up thread pool for each GPU for op scheduling. # Sets up thread pool for each GPU for op scheduling.
per_gpu_thread_count = 1 per_gpu_thread_count = 1
total_gpu_thread_count = per_gpu_thread_count * flags_obj.num_gpus total_gpu_thread_count = per_gpu_thread_count * flags_obj.num_gpus
os.environ['TF_GPU_THREAD_MODE'] = flags_obj.tf_gpu_thread_mode os.environ['TF_GPU_THREAD_MODE'] = flags_obj.tf_gpu_thread_mode
os.environ['TF_GPU_THREAD_COUNT'] = str(per_gpu_thread_count) os.environ['TF_GPU_THREAD_COUNT'] = str(per_gpu_thread_count)
tf.logging.info('TF_GPU_THREAD_COUNT: %s', os.environ['TF_GPU_THREAD_COUNT']) tf.compat.v1.logging.info('TF_GPU_THREAD_COUNT: %s',
tf.logging.info('TF_GPU_THREAD_MODE: %s', os.environ['TF_GPU_THREAD_MODE']) os.environ['TF_GPU_THREAD_COUNT'])
tf.compat.v1.logging.info('TF_GPU_THREAD_MODE: %s',
os.environ['TF_GPU_THREAD_MODE'])
# Reduces general thread pool by number of threads used for GPU pool. # Reduces general thread pool by number of threads used for GPU pool.
main_thread_count = cpu_count - total_gpu_thread_count main_thread_count = cpu_count - total_gpu_thread_count
...@@ -256,13 +258,15 @@ def learning_rate_with_decay( ...@@ -256,13 +258,15 @@ def learning_rate_with_decay(
def learning_rate_fn(global_step): def learning_rate_fn(global_step):
"""Builds scaled learning rate function with 5 epoch warm up.""" """Builds scaled learning rate function with 5 epoch warm up."""
lr = tf.train.piecewise_constant(global_step, boundaries, vals) lr = tf.compat.v1.train.piecewise_constant(global_step, boundaries, vals)
if warmup: if warmup:
warmup_steps = int(batches_per_epoch * 5) warmup_steps = int(batches_per_epoch * 5)
warmup_lr = ( warmup_lr = (
initial_learning_rate * tf.cast(global_step, tf.float32) / tf.cast( initial_learning_rate * tf.cast(global_step, tf.float32) / tf.cast(
warmup_steps, tf.float32)) warmup_steps, tf.float32))
return tf.cond(global_step < warmup_steps, lambda: warmup_lr, lambda: lr) return tf.cond(pred=global_step < warmup_steps,
true_fn=lambda: warmup_lr,
false_fn=lambda: lr)
return lr return lr
return learning_rate_fn return learning_rate_fn
...@@ -313,7 +317,7 @@ def resnet_model_fn(features, labels, mode, model_class, ...@@ -313,7 +317,7 @@ def resnet_model_fn(features, labels, mode, model_class,
""" """
# Generate a summary node for the images # Generate a summary node for the images
tf.summary.image('images', features, max_outputs=6) tf.compat.v1.summary.image('images', features, max_outputs=6)
# Checks that features/images have same data type being used for calculations. # Checks that features/images have same data type being used for calculations.
assert features.dtype == dtype assert features.dtype == dtype
...@@ -328,7 +332,7 @@ def resnet_model_fn(features, labels, mode, model_class, ...@@ -328,7 +332,7 @@ def resnet_model_fn(features, labels, mode, model_class,
logits = tf.cast(logits, tf.float32) logits = tf.cast(logits, tf.float32)
predictions = { predictions = {
'classes': tf.argmax(logits, axis=1), 'classes': tf.argmax(input=logits, axis=1),
'probabilities': tf.nn.softmax(logits, name='softmax_tensor') 'probabilities': tf.nn.softmax(logits, name='softmax_tensor')
} }
...@@ -342,12 +346,12 @@ def resnet_model_fn(features, labels, mode, model_class, ...@@ -342,12 +346,12 @@ def resnet_model_fn(features, labels, mode, model_class,
}) })
# Calculate loss, which includes softmax cross entropy and L2 regularization. # Calculate loss, which includes softmax cross entropy and L2 regularization.
cross_entropy = tf.losses.sparse_softmax_cross_entropy( cross_entropy = tf.compat.v1.losses.sparse_softmax_cross_entropy(
logits=logits, labels=labels) logits=logits, labels=labels)
# Create a tensor named cross_entropy for logging purposes. # Create a tensor named cross_entropy for logging purposes.
tf.identity(cross_entropy, name='cross_entropy') tf.identity(cross_entropy, name='cross_entropy')
tf.summary.scalar('cross_entropy', cross_entropy) tf.compat.v1.summary.scalar('cross_entropy', cross_entropy)
# If no loss_filter_fn is passed, assume we want the default behavior, # If no loss_filter_fn is passed, assume we want the default behavior,
# which is that batch_normalization variables are excluded from loss. # which is that batch_normalization variables are excluded from loss.
...@@ -358,21 +362,21 @@ def resnet_model_fn(features, labels, mode, model_class, ...@@ -358,21 +362,21 @@ def resnet_model_fn(features, labels, mode, model_class,
# Add weight decay to the loss. # Add weight decay to the loss.
l2_loss = weight_decay * tf.add_n( l2_loss = weight_decay * tf.add_n(
# loss is computed using fp32 for numerical stability. # loss is computed using fp32 for numerical stability.
[tf.nn.l2_loss(tf.cast(v, tf.float32)) for v in tf.trainable_variables() [tf.nn.l2_loss(tf.cast(v, tf.float32))
if loss_filter_fn(v.name)]) for v in tf.compat.v1.trainable_variables() if loss_filter_fn(v.name)])
tf.summary.scalar('l2_loss', l2_loss) tf.compat.v1.summary.scalar('l2_loss', l2_loss)
loss = cross_entropy + l2_loss loss = cross_entropy + l2_loss
if mode == tf.estimator.ModeKeys.TRAIN: if mode == tf.estimator.ModeKeys.TRAIN:
global_step = tf.train.get_or_create_global_step() global_step = tf.compat.v1.train.get_or_create_global_step()
learning_rate = learning_rate_fn(global_step) learning_rate = learning_rate_fn(global_step)
# Create a tensor named learning_rate for logging purposes # Create a tensor named learning_rate for logging purposes
tf.identity(learning_rate, name='learning_rate') tf.identity(learning_rate, name='learning_rate')
tf.summary.scalar('learning_rate', learning_rate) tf.compat.v1.summary.scalar('learning_rate', learning_rate)
optimizer = tf.train.MomentumOptimizer( optimizer = tf.compat.v1.train.MomentumOptimizer(
learning_rate=learning_rate, learning_rate=learning_rate,
momentum=momentum momentum=momentum
) )
...@@ -409,24 +413,22 @@ def resnet_model_fn(features, labels, mode, model_class, ...@@ -409,24 +413,22 @@ def resnet_model_fn(features, labels, mode, model_class,
grad_vars = _dense_grad_filter(grad_vars) grad_vars = _dense_grad_filter(grad_vars)
minimize_op = optimizer.apply_gradients(grad_vars, global_step) minimize_op = optimizer.apply_gradients(grad_vars, global_step)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) update_ops = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.UPDATE_OPS)
train_op = tf.group(minimize_op, update_ops) train_op = tf.group(minimize_op, update_ops)
else: else:
train_op = None train_op = None
accuracy = tf.metrics.accuracy(labels, predictions['classes']) accuracy = tf.compat.v1.metrics.accuracy(labels, predictions['classes'])
accuracy_top_5 = tf.metrics.mean(tf.nn.in_top_k(predictions=logits, accuracy_top_5 = tf.compat.v1.metrics.mean(
targets=labels, tf.nn.in_top_k(predictions=logits, targets=labels, k=5, name='top_5_op'))
k=5,
name='top_5_op'))
metrics = {'accuracy': accuracy, metrics = {'accuracy': accuracy,
'accuracy_top_5': accuracy_top_5} 'accuracy_top_5': accuracy_top_5}
# Create a tensor named train_accuracy for logging purposes # Create a tensor named train_accuracy for logging purposes
tf.identity(accuracy[1], name='train_accuracy') tf.identity(accuracy[1], name='train_accuracy')
tf.identity(accuracy_top_5[1], name='train_accuracy_top_5') tf.identity(accuracy_top_5[1], name='train_accuracy_top_5')
tf.summary.scalar('train_accuracy', accuracy[1]) tf.compat.v1.summary.scalar('train_accuracy', accuracy[1])
tf.summary.scalar('train_accuracy_top_5', accuracy_top_5[1]) tf.compat.v1.summary.scalar('train_accuracy_top_5', accuracy_top_5[1])
return tf.estimator.EstimatorSpec( return tf.estimator.EstimatorSpec(
mode=mode, mode=mode,
...@@ -465,7 +467,7 @@ def resnet_main( ...@@ -465,7 +467,7 @@ def resnet_main(
# Creates session config. allow_soft_placement = True, is required for # Creates session config. allow_soft_placement = True, is required for
# multi-GPU and is not harmful for other modes. # multi-GPU and is not harmful for other modes.
session_config = tf.ConfigProto( session_config = tf.compat.v1.ConfigProto(
inter_op_parallelism_threads=flags_obj.inter_op_parallelism_threads, inter_op_parallelism_threads=flags_obj.inter_op_parallelism_threads,
intra_op_parallelism_threads=flags_obj.intra_op_parallelism_threads, intra_op_parallelism_threads=flags_obj.intra_op_parallelism_threads,
allow_soft_placement=True) allow_soft_placement=True)
...@@ -557,13 +559,14 @@ def resnet_main( ...@@ -557,13 +559,14 @@ def resnet_main(
schedule[-1] = flags_obj.train_epochs - sum(schedule[:-1]) # over counting. schedule[-1] = flags_obj.train_epochs - sum(schedule[:-1]) # over counting.
for cycle_index, num_train_epochs in enumerate(schedule): for cycle_index, num_train_epochs in enumerate(schedule):
tf.logging.info('Starting cycle: %d/%d', cycle_index, int(n_loops)) tf.compat.v1.logging.info('Starting cycle: %d/%d', cycle_index,
int(n_loops))
if num_train_epochs: if num_train_epochs:
classifier.train(input_fn=lambda: input_fn_train(num_train_epochs), classifier.train(input_fn=lambda: input_fn_train(num_train_epochs),
hooks=train_hooks, max_steps=flags_obj.max_train_steps) hooks=train_hooks, max_steps=flags_obj.max_train_steps)
tf.logging.info('Starting to evaluate.') tf.compat.v1.logging.info('Starting to evaluate.')
# flags_obj.max_train_steps is generally associated with testing and # flags_obj.max_train_steps is generally associated with testing and
# profiling. As a result it is frequently called with synthetic data, which # profiling. As a result it is frequently called with synthetic data, which
......
...@@ -71,7 +71,7 @@ def construct_scalar_host_call(metric_dict, model_dir, prefix=""): ...@@ -71,7 +71,7 @@ def construct_scalar_host_call(metric_dict, model_dir, prefix=""):
# expects [batch_size, ...] Tensors, thus reshape to introduce a batch # expects [batch_size, ...] Tensors, thus reshape to introduce a batch
# dimension. These Tensors are implicitly concatenated to # dimension. These Tensors are implicitly concatenated to
# [params['batch_size']]. # [params['batch_size']].
global_step_tensor = tf.reshape(tf.train.get_or_create_global_step(), [1]) global_step_tensor = tf.reshape(tf.compat.v1.train.get_or_create_global_step(), [1])
other_tensors = [tf.reshape(metric_dict[key], [1]) for key in metric_names] other_tensors = [tf.reshape(metric_dict[key], [1]) for key in metric_names]
return host_call_fn, [global_step_tensor] + other_tensors return host_call_fn, [global_step_tensor] + other_tensors
......
...@@ -36,14 +36,14 @@ class TPUBaseTester(tf.test.TestCase): ...@@ -36,14 +36,14 @@ class TPUBaseTester(tf.test.TestCase):
np.random.seed(seed) np.random.seed(seed)
embeddings = np.random.random(size=(vocab_size, embedding_dim)) embeddings = np.random.random(size=(vocab_size, embedding_dim))
embedding_table = tf.convert_to_tensor(embeddings, dtype=tf.float32) embedding_table = tf.convert_to_tensor(value=embeddings, dtype=tf.float32)
tokens = np.random.randint(low=1, high=vocab_size-1, tokens = np.random.randint(low=1, high=vocab_size-1,
size=(batch_size, sequence_length)) size=(batch_size, sequence_length))
for i in range(batch_size): for i in range(batch_size):
tokens[i, np.random.randint(low=0, high=sequence_length-1):] = 0 tokens[i, np.random.randint(low=0, high=sequence_length-1):] = 0
values = tf.convert_to_tensor(tokens, dtype=tf.int32) values = tf.convert_to_tensor(value=tokens, dtype=tf.int32)
mask = tf.to_float(tf.not_equal(values, 0)) mask = tf.cast(tf.not_equal(values, 0), dtype=tf.float32)
return embedding_table, values, mask return embedding_table, values, mask
def _test_embedding(self, embedding_dim, vocab_size, def _test_embedding(self, embedding_dim, vocab_size,
......
...@@ -47,11 +47,11 @@ class _GarbageCollector(object): ...@@ -47,11 +47,11 @@ class _GarbageCollector(object):
def purge(self): def purge(self):
try: try:
for i in self.temp_buffers: for i in self.temp_buffers:
if tf.gfile.Exists(i): if tf.io.gfile.exists(i):
tf.gfile.Remove(i) tf.io.gfile.remove(i)
tf.logging.info("Buffer file {} removed".format(i)) tf.compat.v1.logging.info("Buffer file {} removed".format(i))
except Exception as e: except Exception as e:
tf.logging.error("Failed to cleanup buffer files: {}".format(e)) tf.compat.v1.logging.error("Failed to cleanup buffer files: {}".format(e))
_GARBAGE_COLLECTOR = _GarbageCollector() _GARBAGE_COLLECTOR = _GarbageCollector()
...@@ -64,7 +64,7 @@ def write_to_temp_buffer(dataframe, buffer_folder, columns): ...@@ -64,7 +64,7 @@ def write_to_temp_buffer(dataframe, buffer_folder, columns):
if buffer_folder is None: if buffer_folder is None:
_, buffer_path = tempfile.mkstemp() _, buffer_path = tempfile.mkstemp()
else: else:
tf.gfile.MakeDirs(buffer_folder) tf.io.gfile.makedirs(buffer_folder)
buffer_path = os.path.join(buffer_folder, str(uuid.uuid4())) buffer_path = os.path.join(buffer_folder, str(uuid.uuid4()))
_GARBAGE_COLLECTOR.register(buffer_path) _GARBAGE_COLLECTOR.register(buffer_path)
...@@ -169,35 +169,35 @@ def write_to_buffer(dataframe, buffer_path, columns, expected_size=None): ...@@ -169,35 +169,35 @@ def write_to_buffer(dataframe, buffer_path, columns, expected_size=None):
Returns: Returns:
The path of the buffer. The path of the buffer.
""" """
if tf.gfile.Exists(buffer_path) and tf.gfile.Stat(buffer_path).length > 0: if tf.io.gfile.exists(buffer_path) and tf.io.gfile.stat(buffer_path).length > 0:
actual_size = tf.gfile.Stat(buffer_path).length actual_size = tf.io.gfile.stat(buffer_path).length
if expected_size == actual_size: if expected_size == actual_size:
return buffer_path return buffer_path
tf.logging.warning( tf.compat.v1.logging.warning(
"Existing buffer {} has size {}. Expected size {}. Deleting and " "Existing buffer {} has size {}. Expected size {}. Deleting and "
"rebuilding buffer.".format(buffer_path, actual_size, expected_size)) "rebuilding buffer.".format(buffer_path, actual_size, expected_size))
tf.gfile.Remove(buffer_path) tf.io.gfile.remove(buffer_path)
if dataframe is None: if dataframe is None:
raise ValueError( raise ValueError(
"dataframe was None but a valid existing buffer was not found.") "dataframe was None but a valid existing buffer was not found.")
tf.gfile.MakeDirs(os.path.split(buffer_path)[0]) tf.io.gfile.makedirs(os.path.split(buffer_path)[0])
tf.logging.info("Constructing TFRecordDataset buffer: {}".format(buffer_path)) tf.compat.v1.logging.info("Constructing TFRecordDataset buffer: {}".format(buffer_path))
count = 0 count = 0
pool = multiprocessing.Pool(multiprocessing.cpu_count()) pool = multiprocessing.Pool(multiprocessing.cpu_count())
try: try:
with tf.python_io.TFRecordWriter(buffer_path) as writer: with tf.io.TFRecordWriter(buffer_path) as writer:
for df_shards in iter_shard_dataframe(df=dataframe, for df_shards in iter_shard_dataframe(df=dataframe,
rows_per_core=_ROWS_PER_CORE): rows_per_core=_ROWS_PER_CORE):
_serialize_shards(df_shards, columns, pool, writer) _serialize_shards(df_shards, columns, pool, writer)
count += sum([len(s) for s in df_shards]) count += sum([len(s) for s in df_shards])
tf.logging.info("{}/{} examples written." tf.compat.v1.logging.info("{}/{} examples written."
.format(str(count).ljust(8), len(dataframe))) .format(str(count).ljust(8), len(dataframe)))
finally: finally:
pool.terminate() pool.terminate()
tf.logging.info("Buffer write complete.") tf.compat.v1.logging.info("Buffer write complete.")
return buffer_path return buffer_path
...@@ -77,9 +77,9 @@ _TEST_CASES = [ ...@@ -77,9 +77,9 @@ _TEST_CASES = [
] ]
_FEATURE_MAP = { _FEATURE_MAP = {
_RAW_ROW: tf.FixedLenFeature([1], dtype=tf.int64), _RAW_ROW: tf.io.FixedLenFeature([1], dtype=tf.int64),
_DUMMY_COL: tf.FixedLenFeature([1], dtype=tf.int64), _DUMMY_COL: tf.io.FixedLenFeature([1], dtype=tf.int64),
_DUMMY_VEC_COL: tf.FixedLenFeature([_DUMMY_VEC_LEN], dtype=tf.float32) _DUMMY_VEC_COL: tf.io.FixedLenFeature([_DUMMY_VEC_LEN], dtype=tf.float32)
} }
...@@ -156,9 +156,9 @@ class BaseTest(tf.test.TestCase): ...@@ -156,9 +156,9 @@ class BaseTest(tf.test.TestCase):
with self.test_session(graph=tf.Graph()) as sess: with self.test_session(graph=tf.Graph()) as sess:
dataset = tf.data.TFRecordDataset(buffer_path) dataset = tf.data.TFRecordDataset(buffer_path)
dataset = dataset.batch(1).map( dataset = dataset.batch(1).map(
lambda x: tf.parse_example(x, _FEATURE_MAP)) lambda x: tf.io.parse_example(serialized=x, features=_FEATURE_MAP))
data_iter = dataset.make_one_shot_iterator() data_iter = tf.compat.v1.data.make_one_shot_iterator(dataset)
seen_rows = set() seen_rows = set()
for i in range(num_rows+5): for i in range(num_rows+5):
row = data_iter.get_next() row = data_iter.get_next()
...@@ -177,7 +177,7 @@ class BaseTest(tf.test.TestCase): ...@@ -177,7 +177,7 @@ class BaseTest(tf.test.TestCase):
self.assertGreaterEqual(i, num_rows, msg="Too few rows.") self.assertGreaterEqual(i, num_rows, msg="Too few rows.")
file_io._GARBAGE_COLLECTOR.purge() file_io._GARBAGE_COLLECTOR.purge()
assert not tf.gfile.Exists(buffer_path) assert not tf.io.gfile.exists(buffer_path)
def test_serialize_deserialize_0(self): def test_serialize_deserialize_0(self):
self._serialize_deserialize(num_cores=1) self._serialize_deserialize(num_cores=1)
......
...@@ -40,7 +40,7 @@ def build_tensor_serving_input_receiver_fn(shape, dtype=tf.float32, ...@@ -40,7 +40,7 @@ def build_tensor_serving_input_receiver_fn(shape, dtype=tf.float32,
""" """
def serving_input_receiver_fn(): def serving_input_receiver_fn():
# Prep a placeholder where the input example will be fed in # Prep a placeholder where the input example will be fed in
features = tf.placeholder( features = tf.compat.v1.placeholder(
dtype=dtype, shape=[batch_size] + shape, name='input_tensor') dtype=dtype, shape=[batch_size] + shape, name='input_tensor')
return tf.estimator.export.TensorServingInputReceiver( return tf.estimator.export.TensorServingInputReceiver(
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment