Commit 7546a9e3 authored by Haoyu Zhang's avatar Haoyu Zhang Committed by Toby Boyd
Browse files

Fix internal lint errors (#6937)

parent ba415414
......@@ -335,6 +335,7 @@ def define_ncf_flags():
help=flags_core.help_wrap(
"If True, we use a custom training loop for keras."))
def convert_to_softmax_logits(logits):
'''Convert the logits returned by the base model to softmax logits.
......
......@@ -353,7 +353,7 @@ def run_ncf(_):
train_loss += train_step()
time_callback.on_batch_end(step+epoch*num_train_steps)
logging.info("Done training epoch %s, epoch loss=%s.",
epoch+1, train_loss/num_train_steps)
epoch+1, train_loss/num_train_steps)
eval_input_iterator.initialize()
hr_sum = 0
hr_count = 0
......
......@@ -603,9 +603,7 @@ class Resnet50KerasBenchmarkBase(keras_benchmark.KerasBenchmark):
self._run_and_report_benchmark()
def benchmark_xla_8_gpu_fp16_cloning_tweaked(self):
"""Test Keras model with manual config tuning, XLA, 8 GPUs, fp16, and
cloning.
"""
"""Test with manual config tuning, XLA, 8 GPUs, fp16, and cloning."""
self._setup()
FLAGS.num_gpus = 8
......@@ -623,8 +621,9 @@ class Resnet50KerasBenchmarkBase(keras_benchmark.KerasBenchmark):
self._run_and_report_benchmark()
def benchmark_xla_8_gpu_fp16_tweaked_delay_measure(self):
"""Test Keras model with manual config tuning, XLA, 8 GPUs and fp16. Delay
performance measurement for stable performance on 96 vCPU platforms.
"""Test with manual config tuning, XLA, 8 GPUs and fp16.
Delay performance measurement for stable performance on 96 vCPU platforms.
"""
self._setup()
......@@ -643,9 +642,9 @@ class Resnet50KerasBenchmarkBase(keras_benchmark.KerasBenchmark):
self._run_and_report_benchmark()
def benchmark_xla_8_gpu_fp16_cloning_tweaked_delay_measure(self):
"""Test Keras model with manual config tuning, XLA, 8 GPUs, fp16, and
cloning. Delay performance measurement for stable performance on 96 vCPU
platforms.
"""Test with manual config tuning, XLA, 8 GPUs, fp16, and cloning.
Delay performance measurement for stable performance on 96 vCPU platforms.
"""
self._setup()
......@@ -821,9 +820,9 @@ class Resnet50KerasBenchmarkBase(keras_benchmark.KerasBenchmark):
self._run_and_report_benchmark()
def benchmark_graph_xla_8_gpu_fp16_tweaked_delay_measure(self):
"""Test Keras model in legacy graph mode with manual config tuning, XLA,
8 GPUs and fp16. Delay performance measurement for stable performance
on 96 vCPU platforms.
"""Test in legacy graph mode with manual config tuning, XLA, 8 GPUs, fp16.
Delay performance measurement for stable performance on 96 vCPU platforms.
"""
self._setup()
......@@ -841,8 +840,7 @@ class Resnet50KerasBenchmarkBase(keras_benchmark.KerasBenchmark):
self._run_and_report_benchmark()
def benchmark_graph_xla_8_gpu_fp16_tweaked_optional_next(self):
"""Test Keras model in legacy graph mode with manual config tuning, XLA,
8 GPUs and fp16.
"""Test in legacy graph mode with manual config tuning, XLA, 8 GPUs, fp16.
This test also enables get_next_as_optional.
"""
......
......@@ -251,7 +251,8 @@ def _read_and_batch_from_files(
([max_length], [max_length]), drop_remainder=True)
else:
# Group and batch such that each batch has examples of similar length.
# TODO: _batch_examples might need to do something special for num_replicas.
# TODO(xunkai): _batch_examples might need to do something special for
# num_replicas.
dataset = _batch_examples(dataset, batch_size, max_length)
dataset = dataset.repeat(repeat)
......
......@@ -25,7 +25,7 @@ from __future__ import print_function
import os
import tempfile
from absl import app as absl_app
from absl import app as absl_app # pylint: disable=unused-import
from absl import flags
import tensorflow as tf
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment