Commit fc4ce16b authored by A. Unique TensorFlower's avatar A. Unique TensorFlower
Browse files

Switch away from contrib where possible in tensorflow_models/official

PiperOrigin-RevId: 288064272
parent 39f4e422
......@@ -400,7 +400,7 @@ def _single_token_mask(inputs, tgt_len, num_predict):
non_func_indices = tf.boolean_mask(all_indices, non_func_mask)
masked_pos = tf.random.shuffle(non_func_indices)
masked_pos = tf.contrib.framework.sort(masked_pos[:num_predict])
masked_pos = tf.sort(masked_pos[:num_predict])
target_mask = tf.sparse_to_dense(
sparse_indices=masked_pos,
output_shape=[tgt_len],
......
......@@ -229,7 +229,8 @@ def train_boosted_trees(flags_obj):
# Though BoostedTreesClassifier is under tf.estimator, faster in-memory
# training is yet provided as a contrib library.
classifier = tf.contrib.estimator.boosted_trees_classifier_train_in_memory(
from tensorflow.contrib import estimator as contrib_estimator # pylint: disable=g-import-not-at-top
classifier = contrib_estimator.boosted_trees_classifier_train_in_memory(
train_input_fn,
feature_columns,
model_dir=flags_obj.model_dir or None,
......
......@@ -58,10 +58,11 @@ def compute_accuracy(logits, labels):
def train(model, optimizer, dataset, step_counter, log_interval=None):
"""Trains model on `dataset` using `optimizer`."""
from tensorflow.contrib import summary as contrib_summary # pylint: disable=g-import-not-at-top
start = time.time()
for (batch, (images, labels)) in enumerate(dataset):
with tf.contrib.summary.record_summaries_every_n_global_steps(
with contrib_summary.record_summaries_every_n_global_steps(
10, global_step=step_counter):
# Record the operations used to compute the loss given the input,
# so that the gradient of the loss with respect to the variables
......@@ -69,8 +70,9 @@ def train(model, optimizer, dataset, step_counter, log_interval=None):
with tf.GradientTape() as tape:
logits = model(images, training=True)
loss_value = loss(logits, labels)
tf.contrib.summary.scalar('loss', loss_value)
tf.contrib.summary.scalar('accuracy', compute_accuracy(logits, labels))
contrib_summary.scalar('loss', loss_value)
contrib_summary.scalar('accuracy',
compute_accuracy(logits, labels))
grads = tape.gradient(loss_value, model.variables)
optimizer.apply_gradients(
zip(grads, model.variables), global_step=step_counter)
......@@ -82,6 +84,7 @@ def train(model, optimizer, dataset, step_counter, log_interval=None):
def test(model, dataset):
"""Perform an evaluation of `model` on the examples from `dataset`."""
from tensorflow.contrib import summary as contrib_summary # pylint: disable=g-import-not-at-top
avg_loss = tf.keras.metrics.Mean('loss', dtype=tf.float32)
accuracy = tf.keras.metrics.Accuracy('accuracy', dtype=tf.float32)
......@@ -93,9 +96,9 @@ def test(model, dataset):
tf.cast(labels, tf.int64))
print('Test set: Average loss: %.4f, Accuracy: %4f%%\n' %
(avg_loss.result(), 100 * accuracy.result()))
with tf.contrib.summary.always_record_summaries():
tf.contrib.summary.scalar('loss', avg_loss.result())
tf.contrib.summary.scalar('accuracy', accuracy.result())
with contrib_summary.always_record_summaries():
contrib_summary.scalar('loss', avg_loss.result())
contrib_summary.scalar('accuracy', accuracy.result())
def run_mnist_eager(flags_obj):
......@@ -137,9 +140,9 @@ def run_mnist_eager(flags_obj):
else:
train_dir = None
test_dir = None
summary_writer = tf.contrib.summary.create_file_writer(
summary_writer = tf.compat.v2.summary.create_file_writer(
train_dir, flush_millis=10000)
test_summary_writer = tf.contrib.summary.create_file_writer(
test_summary_writer = tf.compat.v2.summary.create_file_writer(
test_dir, flush_millis=10000, name='test')
# Create and restore checkpoint (if one exists on the path)
......
......@@ -98,7 +98,7 @@ def model_fn(features, labels, mode, params):
'class_ids': tf.argmax(logits, axis=1),
'probabilities': tf.nn.softmax(logits),
}
return tf.contrib.tpu.TPUEstimatorSpec(mode, predictions=predictions)
return tf.compat.v1.estimator.tpu.TPUEstimatorSpec(mode, predictions=predictions)
logits = model(image, training=(mode == tf.estimator.ModeKeys.TRAIN))
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
......@@ -111,14 +111,14 @@ def model_fn(features, labels, mode, params):
decay_rate=0.96)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
if FLAGS.use_tpu:
optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer)
return tf.contrib.tpu.TPUEstimatorSpec(
optimizer = tf.compat.v1.tpu.CrossShardOptimizer(optimizer)
return tf.compat.v1.estimator.tpu.TPUEstimatorSpec(
mode=mode,
loss=loss,
train_op=optimizer.minimize(loss, tf.train.get_global_step()))
if mode == tf.estimator.ModeKeys.EVAL:
return tf.contrib.tpu.TPUEstimatorSpec(
return tf.compat.v1.estimator.tpu.TPUEstimatorSpec(
mode=mode, loss=loss, eval_metrics=(metric_fn, [labels, logits]))
......@@ -128,7 +128,7 @@ def train_input_fn(params):
data_dir = params["data_dir"]
# Retrieves the batch size for the current shard. The # of shards is
# computed according to the input pipeline deployment. See
# `tf.contrib.tpu.RunConfig` for details.
# `tf.compat.v1.estimator.tpu.RunConfig` for details.
ds = dataset.train(data_dir).cache().repeat().shuffle(
buffer_size=50000).batch(batch_size, drop_remainder=True)
return ds
......@@ -153,21 +153,22 @@ def main(argv):
del argv # Unused.
tf.logging.set_verbosity(tf.logging.INFO)
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
tpu_cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
FLAGS.tpu,
zone=FLAGS.tpu_zone,
project=FLAGS.gcp_project
)
run_config = tf.contrib.tpu.RunConfig(
run_config = tf.compat.v1.estimator.tpu.RunConfig(
cluster=tpu_cluster_resolver,
model_dir=FLAGS.model_dir,
session_config=tf.ConfigProto(
allow_soft_placement=True, log_device_placement=True),
tpu_config=tf.contrib.tpu.TPUConfig(FLAGS.iterations, FLAGS.num_shards),
tpu_config=tf.compat.v1.estimator.tpu.TPUConfig(
FLAGS.iterations, FLAGS.num_shards),
)
estimator = tf.contrib.tpu.TPUEstimator(
estimator = tf.compat.v1.estimator.tpu.TPUEstimator(
model_fn=model_fn,
use_tpu=FLAGS.use_tpu,
train_batch_size=FLAGS.batch_size,
......
......@@ -445,7 +445,8 @@ def resnet_model_fn(features, labels, mode, model_class,
tf.compat.v1.summary.scalar('learning_rate', learning_rate)
if flags.FLAGS.enable_lars:
optimizer = tf.contrib.opt.LARSOptimizer(
from tensorflow.contrib import opt as contrib_opt # pylint: disable=g-import-not-at-top
optimizer = contrib_opt.LARSOptimizer(
learning_rate,
momentum=momentum,
weight_decay=weight_decay,
......
......@@ -177,14 +177,15 @@ def get_train_op_and_metrics(loss, params):
# Create optimizer. Use LazyAdamOptimizer from TF contrib, which is faster
# than the TF core Adam optimizer.
optimizer = tf.contrib.opt.LazyAdamOptimizer(
from tensorflow.contrib import opt as contrib_opt # pylint: disable=g-import-not-at-top
optimizer = contrib_opt.LazyAdamOptimizer(
learning_rate,
beta1=params["optimizer_adam_beta1"],
beta2=params["optimizer_adam_beta2"],
epsilon=params["optimizer_adam_epsilon"])
if params["use_tpu"] and params["tpu"] != tpu_util.LOCAL:
optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer)
optimizer = tf.compat.v1.tpu.CrossShardOptimizer(optimizer)
# Uses automatic mixed precision FP16 training if on GPU.
if params["dtype"] == "fp16":
......@@ -533,7 +534,7 @@ def construct_estimator(flags_obj, params, schedule_manager):
model_fn=model_fn, model_dir=flags_obj.model_dir, params=params,
config=tf.estimator.RunConfig(train_distribute=distribution_strategy))
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
tpu_cluster_resolver = tf.compat.v1.cluster_resolver.TPUClusterResolver(
tpu=flags_obj.tpu,
zone=flags_obj.tpu_zone,
project=flags_obj.tpu_gcp_project
......
......@@ -58,13 +58,13 @@ def construct_scalar_host_call(metric_dict, model_dir, prefix=""):
List of summary ops to run on the CPU host.
"""
step = global_step[0]
with tf.contrib.summary.create_file_writer(
with tf.compat.v1.summary.create_file_writer(
logdir=model_dir, filename_suffix=".host_call").as_default():
with tf.contrib.summary.always_record_summaries():
with tf.compat.v1.summary.always_record_summaries():
for i, name in enumerate(metric_names):
tf.contrib.summary.scalar(prefix + name, args[i][0], step=step)
tf.compat.v1.summary.scalar(prefix + name, args[i][0], step=step)
return tf.contrib.summary.all_summary_ops()
return tf.compat.v1.summary.all_summary_ops()
# To log the current learning rate, and gradient norm for Tensorboard, the
# summary op needs to be run on the host CPU via host_call. host_call
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment