Commit 7546a9e3 authored by Haoyu Zhang's avatar Haoyu Zhang Committed by Toby Boyd
Browse files

Fix internal lint errors (#6937)

parent ba415414
...@@ -335,6 +335,7 @@ def define_ncf_flags(): ...@@ -335,6 +335,7 @@ def define_ncf_flags():
help=flags_core.help_wrap( help=flags_core.help_wrap(
"If True, we use a custom training loop for keras.")) "If True, we use a custom training loop for keras."))
def convert_to_softmax_logits(logits): def convert_to_softmax_logits(logits):
'''Convert the logits returned by the base model to softmax logits. '''Convert the logits returned by the base model to softmax logits.
......
...@@ -353,7 +353,7 @@ def run_ncf(_): ...@@ -353,7 +353,7 @@ def run_ncf(_):
train_loss += train_step() train_loss += train_step()
time_callback.on_batch_end(step+epoch*num_train_steps) time_callback.on_batch_end(step+epoch*num_train_steps)
logging.info("Done training epoch %s, epoch loss=%s.", logging.info("Done training epoch %s, epoch loss=%s.",
epoch+1, train_loss/num_train_steps) epoch+1, train_loss/num_train_steps)
eval_input_iterator.initialize() eval_input_iterator.initialize()
hr_sum = 0 hr_sum = 0
hr_count = 0 hr_count = 0
......
...@@ -603,9 +603,7 @@ class Resnet50KerasBenchmarkBase(keras_benchmark.KerasBenchmark): ...@@ -603,9 +603,7 @@ class Resnet50KerasBenchmarkBase(keras_benchmark.KerasBenchmark):
self._run_and_report_benchmark() self._run_and_report_benchmark()
def benchmark_xla_8_gpu_fp16_cloning_tweaked(self): def benchmark_xla_8_gpu_fp16_cloning_tweaked(self):
"""Test Keras model with manual config tuning, XLA, 8 GPUs, fp16, and """Test with manual config tuning, XLA, 8 GPUs, fp16, and cloning."""
cloning.
"""
self._setup() self._setup()
FLAGS.num_gpus = 8 FLAGS.num_gpus = 8
...@@ -623,8 +621,9 @@ class Resnet50KerasBenchmarkBase(keras_benchmark.KerasBenchmark): ...@@ -623,8 +621,9 @@ class Resnet50KerasBenchmarkBase(keras_benchmark.KerasBenchmark):
self._run_and_report_benchmark() self._run_and_report_benchmark()
def benchmark_xla_8_gpu_fp16_tweaked_delay_measure(self): def benchmark_xla_8_gpu_fp16_tweaked_delay_measure(self):
"""Test Keras model with manual config tuning, XLA, 8 GPUs and fp16. Delay """Test with manual config tuning, XLA, 8 GPUs and fp16.
performance measurement for stable performance on 96 vCPU platforms.
Delay performance measurement for stable performance on 96 vCPU platforms.
""" """
self._setup() self._setup()
...@@ -643,9 +642,9 @@ class Resnet50KerasBenchmarkBase(keras_benchmark.KerasBenchmark): ...@@ -643,9 +642,9 @@ class Resnet50KerasBenchmarkBase(keras_benchmark.KerasBenchmark):
self._run_and_report_benchmark() self._run_and_report_benchmark()
def benchmark_xla_8_gpu_fp16_cloning_tweaked_delay_measure(self): def benchmark_xla_8_gpu_fp16_cloning_tweaked_delay_measure(self):
"""Test Keras model with manual config tuning, XLA, 8 GPUs, fp16, and """Test with manual config tuning, XLA, 8 GPUs, fp16, and cloning.
cloning. Delay performance measurement for stable performance on 96 vCPU
platforms. Delay performance measurement for stable performance on 96 vCPU platforms.
""" """
self._setup() self._setup()
...@@ -821,9 +820,9 @@ class Resnet50KerasBenchmarkBase(keras_benchmark.KerasBenchmark): ...@@ -821,9 +820,9 @@ class Resnet50KerasBenchmarkBase(keras_benchmark.KerasBenchmark):
self._run_and_report_benchmark() self._run_and_report_benchmark()
def benchmark_graph_xla_8_gpu_fp16_tweaked_delay_measure(self): def benchmark_graph_xla_8_gpu_fp16_tweaked_delay_measure(self):
"""Test Keras model in legacy graph mode with manual config tuning, XLA, """Test in legacy graph mode with manual config tuning, XLA, 8 GPUs, fp16.
8 GPUs and fp16. Delay performance measurement for stable performance
on 96 vCPU platforms. Delay performance measurement for stable performance on 96 vCPU platforms.
""" """
self._setup() self._setup()
...@@ -841,8 +840,7 @@ class Resnet50KerasBenchmarkBase(keras_benchmark.KerasBenchmark): ...@@ -841,8 +840,7 @@ class Resnet50KerasBenchmarkBase(keras_benchmark.KerasBenchmark):
self._run_and_report_benchmark() self._run_and_report_benchmark()
def benchmark_graph_xla_8_gpu_fp16_tweaked_optional_next(self): def benchmark_graph_xla_8_gpu_fp16_tweaked_optional_next(self):
"""Test Keras model in legacy graph mode with manual config tuning, XLA, """Test in legacy graph mode with manual config tuning, XLA, 8 GPUs, fp16.
8 GPUs and fp16.
This test also enables get_next_as_optional. This test also enables get_next_as_optional.
""" """
......
...@@ -251,7 +251,8 @@ def _read_and_batch_from_files( ...@@ -251,7 +251,8 @@ def _read_and_batch_from_files(
([max_length], [max_length]), drop_remainder=True) ([max_length], [max_length]), drop_remainder=True)
else: else:
# Group and batch such that each batch has examples of similar length. # Group and batch such that each batch has examples of similar length.
# TODO: _batch_examples might need to do something special for num_replicas. # TODO(xunkai): _batch_examples might need to do something special for
# num_replicas.
dataset = _batch_examples(dataset, batch_size, max_length) dataset = _batch_examples(dataset, batch_size, max_length)
dataset = dataset.repeat(repeat) dataset = dataset.repeat(repeat)
......
...@@ -25,7 +25,7 @@ from __future__ import print_function ...@@ -25,7 +25,7 @@ from __future__ import print_function
import os import os
import tempfile import tempfile
from absl import app as absl_app from absl import app as absl_app # pylint: disable=unused-import
from absl import flags from absl import flags
import tensorflow as tf import tensorflow as tf
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment