Commit fc4ce16b authored by A. Unique TensorFlower's avatar A. Unique TensorFlower
Browse files

Switch away from contrib where possible in tensorflow_models/official

PiperOrigin-RevId: 288064272
parent 39f4e422
...@@ -400,7 +400,7 @@ def _single_token_mask(inputs, tgt_len, num_predict): ...@@ -400,7 +400,7 @@ def _single_token_mask(inputs, tgt_len, num_predict):
non_func_indices = tf.boolean_mask(all_indices, non_func_mask) non_func_indices = tf.boolean_mask(all_indices, non_func_mask)
masked_pos = tf.random.shuffle(non_func_indices) masked_pos = tf.random.shuffle(non_func_indices)
masked_pos = tf.contrib.framework.sort(masked_pos[:num_predict]) masked_pos = tf.sort(masked_pos[:num_predict])
target_mask = tf.sparse_to_dense( target_mask = tf.sparse_to_dense(
sparse_indices=masked_pos, sparse_indices=masked_pos,
output_shape=[tgt_len], output_shape=[tgt_len],
......
...@@ -229,7 +229,8 @@ def train_boosted_trees(flags_obj): ...@@ -229,7 +229,8 @@ def train_boosted_trees(flags_obj):
# Though BoostedTreesClassifier is under tf.estimator, faster in-memory # Though BoostedTreesClassifier is under tf.estimator, faster in-memory
# training is yet provided as a contrib library. # training is yet provided as a contrib library.
classifier = tf.contrib.estimator.boosted_trees_classifier_train_in_memory( from tensorflow.contrib import estimator as contrib_estimator # pylint: disable=g-import-not-at-top
classifier = contrib_estimator.boosted_trees_classifier_train_in_memory(
train_input_fn, train_input_fn,
feature_columns, feature_columns,
model_dir=flags_obj.model_dir or None, model_dir=flags_obj.model_dir or None,
......
...@@ -58,10 +58,11 @@ def compute_accuracy(logits, labels): ...@@ -58,10 +58,11 @@ def compute_accuracy(logits, labels):
def train(model, optimizer, dataset, step_counter, log_interval=None): def train(model, optimizer, dataset, step_counter, log_interval=None):
"""Trains model on `dataset` using `optimizer`.""" """Trains model on `dataset` using `optimizer`."""
from tensorflow.contrib import summary as contrib_summary # pylint: disable=g-import-not-at-top
start = time.time() start = time.time()
for (batch, (images, labels)) in enumerate(dataset): for (batch, (images, labels)) in enumerate(dataset):
with tf.contrib.summary.record_summaries_every_n_global_steps( with contrib_summary.record_summaries_every_n_global_steps(
10, global_step=step_counter): 10, global_step=step_counter):
# Record the operations used to compute the loss given the input, # Record the operations used to compute the loss given the input,
# so that the gradient of the loss with respect to the variables # so that the gradient of the loss with respect to the variables
...@@ -69,8 +70,9 @@ def train(model, optimizer, dataset, step_counter, log_interval=None): ...@@ -69,8 +70,9 @@ def train(model, optimizer, dataset, step_counter, log_interval=None):
with tf.GradientTape() as tape: with tf.GradientTape() as tape:
logits = model(images, training=True) logits = model(images, training=True)
loss_value = loss(logits, labels) loss_value = loss(logits, labels)
tf.contrib.summary.scalar('loss', loss_value) contrib_summary.scalar('loss', loss_value)
tf.contrib.summary.scalar('accuracy', compute_accuracy(logits, labels)) contrib_summary.scalar('accuracy',
compute_accuracy(logits, labels))
grads = tape.gradient(loss_value, model.variables) grads = tape.gradient(loss_value, model.variables)
optimizer.apply_gradients( optimizer.apply_gradients(
zip(grads, model.variables), global_step=step_counter) zip(grads, model.variables), global_step=step_counter)
...@@ -82,6 +84,7 @@ def train(model, optimizer, dataset, step_counter, log_interval=None): ...@@ -82,6 +84,7 @@ def train(model, optimizer, dataset, step_counter, log_interval=None):
def test(model, dataset): def test(model, dataset):
"""Perform an evaluation of `model` on the examples from `dataset`.""" """Perform an evaluation of `model` on the examples from `dataset`."""
from tensorflow.contrib import summary as contrib_summary # pylint: disable=g-import-not-at-top
avg_loss = tf.keras.metrics.Mean('loss', dtype=tf.float32) avg_loss = tf.keras.metrics.Mean('loss', dtype=tf.float32)
accuracy = tf.keras.metrics.Accuracy('accuracy', dtype=tf.float32) accuracy = tf.keras.metrics.Accuracy('accuracy', dtype=tf.float32)
...@@ -93,9 +96,9 @@ def test(model, dataset): ...@@ -93,9 +96,9 @@ def test(model, dataset):
tf.cast(labels, tf.int64)) tf.cast(labels, tf.int64))
print('Test set: Average loss: %.4f, Accuracy: %4f%%\n' % print('Test set: Average loss: %.4f, Accuracy: %4f%%\n' %
(avg_loss.result(), 100 * accuracy.result())) (avg_loss.result(), 100 * accuracy.result()))
with tf.contrib.summary.always_record_summaries(): with contrib_summary.always_record_summaries():
tf.contrib.summary.scalar('loss', avg_loss.result()) contrib_summary.scalar('loss', avg_loss.result())
tf.contrib.summary.scalar('accuracy', accuracy.result()) contrib_summary.scalar('accuracy', accuracy.result())
def run_mnist_eager(flags_obj): def run_mnist_eager(flags_obj):
...@@ -137,9 +140,9 @@ def run_mnist_eager(flags_obj): ...@@ -137,9 +140,9 @@ def run_mnist_eager(flags_obj):
else: else:
train_dir = None train_dir = None
test_dir = None test_dir = None
summary_writer = tf.contrib.summary.create_file_writer( summary_writer = tf.compat.v2.summary.create_file_writer(
train_dir, flush_millis=10000) train_dir, flush_millis=10000)
test_summary_writer = tf.contrib.summary.create_file_writer( test_summary_writer = tf.compat.v2.summary.create_file_writer(
test_dir, flush_millis=10000, name='test') test_dir, flush_millis=10000, name='test')
# Create and restore checkpoint (if one exists on the path) # Create and restore checkpoint (if one exists on the path)
......
...@@ -98,7 +98,7 @@ def model_fn(features, labels, mode, params): ...@@ -98,7 +98,7 @@ def model_fn(features, labels, mode, params):
'class_ids': tf.argmax(logits, axis=1), 'class_ids': tf.argmax(logits, axis=1),
'probabilities': tf.nn.softmax(logits), 'probabilities': tf.nn.softmax(logits),
} }
return tf.contrib.tpu.TPUEstimatorSpec(mode, predictions=predictions) return tf.compat.v1.estimator.tpu.TPUEstimatorSpec(mode, predictions=predictions)
logits = model(image, training=(mode == tf.estimator.ModeKeys.TRAIN)) logits = model(image, training=(mode == tf.estimator.ModeKeys.TRAIN))
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits) loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
...@@ -111,14 +111,14 @@ def model_fn(features, labels, mode, params): ...@@ -111,14 +111,14 @@ def model_fn(features, labels, mode, params):
decay_rate=0.96) decay_rate=0.96)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate) optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
if FLAGS.use_tpu: if FLAGS.use_tpu:
optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer) optimizer = tf.compat.v1.tpu.CrossShardOptimizer(optimizer)
return tf.contrib.tpu.TPUEstimatorSpec( return tf.compat.v1.estimator.tpu.TPUEstimatorSpec(
mode=mode, mode=mode,
loss=loss, loss=loss,
train_op=optimizer.minimize(loss, tf.train.get_global_step())) train_op=optimizer.minimize(loss, tf.train.get_global_step()))
if mode == tf.estimator.ModeKeys.EVAL: if mode == tf.estimator.ModeKeys.EVAL:
return tf.contrib.tpu.TPUEstimatorSpec( return tf.compat.v1.estimator.tpu.TPUEstimatorSpec(
mode=mode, loss=loss, eval_metrics=(metric_fn, [labels, logits])) mode=mode, loss=loss, eval_metrics=(metric_fn, [labels, logits]))
...@@ -128,7 +128,7 @@ def train_input_fn(params): ...@@ -128,7 +128,7 @@ def train_input_fn(params):
data_dir = params["data_dir"] data_dir = params["data_dir"]
# Retrieves the batch size for the current shard. The # of shards is # Retrieves the batch size for the current shard. The # of shards is
# computed according to the input pipeline deployment. See # computed according to the input pipeline deployment. See
# `tf.contrib.tpu.RunConfig` for details. # `tf.compat.v1.estimator.tpu.RunConfig` for details.
ds = dataset.train(data_dir).cache().repeat().shuffle( ds = dataset.train(data_dir).cache().repeat().shuffle(
buffer_size=50000).batch(batch_size, drop_remainder=True) buffer_size=50000).batch(batch_size, drop_remainder=True)
return ds return ds
...@@ -153,21 +153,22 @@ def main(argv): ...@@ -153,21 +153,22 @@ def main(argv):
del argv # Unused. del argv # Unused.
tf.logging.set_verbosity(tf.logging.INFO) tf.logging.set_verbosity(tf.logging.INFO)
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver( tpu_cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
FLAGS.tpu, FLAGS.tpu,
zone=FLAGS.tpu_zone, zone=FLAGS.tpu_zone,
project=FLAGS.gcp_project project=FLAGS.gcp_project
) )
run_config = tf.contrib.tpu.RunConfig( run_config = tf.compat.v1.estimator.tpu.RunConfig(
cluster=tpu_cluster_resolver, cluster=tpu_cluster_resolver,
model_dir=FLAGS.model_dir, model_dir=FLAGS.model_dir,
session_config=tf.ConfigProto( session_config=tf.ConfigProto(
allow_soft_placement=True, log_device_placement=True), allow_soft_placement=True, log_device_placement=True),
tpu_config=tf.contrib.tpu.TPUConfig(FLAGS.iterations, FLAGS.num_shards), tpu_config=tf.compat.v1.estimator.tpu.TPUConfig(
FLAGS.iterations, FLAGS.num_shards),
) )
estimator = tf.contrib.tpu.TPUEstimator( estimator = tf.compat.v1.estimator.tpu.TPUEstimator(
model_fn=model_fn, model_fn=model_fn,
use_tpu=FLAGS.use_tpu, use_tpu=FLAGS.use_tpu,
train_batch_size=FLAGS.batch_size, train_batch_size=FLAGS.batch_size,
......
...@@ -445,7 +445,8 @@ def resnet_model_fn(features, labels, mode, model_class, ...@@ -445,7 +445,8 @@ def resnet_model_fn(features, labels, mode, model_class,
tf.compat.v1.summary.scalar('learning_rate', learning_rate) tf.compat.v1.summary.scalar('learning_rate', learning_rate)
if flags.FLAGS.enable_lars: if flags.FLAGS.enable_lars:
optimizer = tf.contrib.opt.LARSOptimizer( from tensorflow.contrib import opt as contrib_opt # pylint: disable=g-import-not-at-top
optimizer = contrib_opt.LARSOptimizer(
learning_rate, learning_rate,
momentum=momentum, momentum=momentum,
weight_decay=weight_decay, weight_decay=weight_decay,
......
...@@ -177,14 +177,15 @@ def get_train_op_and_metrics(loss, params): ...@@ -177,14 +177,15 @@ def get_train_op_and_metrics(loss, params):
# Create optimizer. Use LazyAdamOptimizer from TF contrib, which is faster # Create optimizer. Use LazyAdamOptimizer from TF contrib, which is faster
# than the TF core Adam optimizer. # than the TF core Adam optimizer.
optimizer = tf.contrib.opt.LazyAdamOptimizer( from tensorflow.contrib import opt as contrib_opt # pylint: disable=g-import-not-at-top
optimizer = contrib_opt.LazyAdamOptimizer(
learning_rate, learning_rate,
beta1=params["optimizer_adam_beta1"], beta1=params["optimizer_adam_beta1"],
beta2=params["optimizer_adam_beta2"], beta2=params["optimizer_adam_beta2"],
epsilon=params["optimizer_adam_epsilon"]) epsilon=params["optimizer_adam_epsilon"])
if params["use_tpu"] and params["tpu"] != tpu_util.LOCAL: if params["use_tpu"] and params["tpu"] != tpu_util.LOCAL:
optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer) optimizer = tf.compat.v1.tpu.CrossShardOptimizer(optimizer)
# Uses automatic mixed precision FP16 training if on GPU. # Uses automatic mixed precision FP16 training if on GPU.
if params["dtype"] == "fp16": if params["dtype"] == "fp16":
...@@ -533,7 +534,7 @@ def construct_estimator(flags_obj, params, schedule_manager): ...@@ -533,7 +534,7 @@ def construct_estimator(flags_obj, params, schedule_manager):
model_fn=model_fn, model_dir=flags_obj.model_dir, params=params, model_fn=model_fn, model_dir=flags_obj.model_dir, params=params,
config=tf.estimator.RunConfig(train_distribute=distribution_strategy)) config=tf.estimator.RunConfig(train_distribute=distribution_strategy))
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver( tpu_cluster_resolver = tf.compat.v1.cluster_resolver.TPUClusterResolver(
tpu=flags_obj.tpu, tpu=flags_obj.tpu,
zone=flags_obj.tpu_zone, zone=flags_obj.tpu_zone,
project=flags_obj.tpu_gcp_project project=flags_obj.tpu_gcp_project
......
...@@ -58,13 +58,13 @@ def construct_scalar_host_call(metric_dict, model_dir, prefix=""): ...@@ -58,13 +58,13 @@ def construct_scalar_host_call(metric_dict, model_dir, prefix=""):
List of summary ops to run on the CPU host. List of summary ops to run on the CPU host.
""" """
step = global_step[0] step = global_step[0]
with tf.contrib.summary.create_file_writer( with tf.compat.v1.summary.create_file_writer(
logdir=model_dir, filename_suffix=".host_call").as_default(): logdir=model_dir, filename_suffix=".host_call").as_default():
with tf.contrib.summary.always_record_summaries(): with tf.compat.v1.summary.always_record_summaries():
for i, name in enumerate(metric_names): for i, name in enumerate(metric_names):
tf.contrib.summary.scalar(prefix + name, args[i][0], step=step) tf.compat.v1.summary.scalar(prefix + name, args[i][0], step=step)
return tf.contrib.summary.all_summary_ops() return tf.compat.v1.summary.all_summary_ops()
# To log the current learning rate, and gradient norm for Tensorboard, the # To log the current learning rate, and gradient norm for Tensorboard, the
# summary op needs to be run on the host CPU via host_call. host_call # summary op needs to be run on the host CPU via host_call. host_call
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment