Unverified Commit dcdc45bd authored by Haoyu Zhang's avatar Haoyu Zhang Committed by GitHub
Browse files

Do not use XLA in warmup tests (#6951)

Because we run warmup tests in all real data benchmarks, XLA bugs will cause non-XLA tests to fail as well.
parent e7b21bfd
...@@ -1007,7 +1007,6 @@ class TrivialKerasBenchmarkReal(keras_benchmark.KerasBenchmark): ...@@ -1007,7 +1007,6 @@ class TrivialKerasBenchmarkReal(keras_benchmark.KerasBenchmark):
def_flags['skip_eval'] = True def_flags['skip_eval'] = True
def_flags['report_accuracy_metrics'] = False def_flags['report_accuracy_metrics'] = False
def_flags['dtype'] = 'fp16' def_flags['dtype'] = 'fp16'
def_flags['enable_xla'] = True
def_flags['data_dir'] = os.path.join(root_data_dir, 'imagenet') def_flags['data_dir'] = os.path.join(root_data_dir, 'imagenet')
def_flags['train_steps'] = 600 def_flags['train_steps'] = 600
def_flags['log_steps'] = 100 def_flags['log_steps'] = 100
...@@ -1046,6 +1045,7 @@ class TrivialKerasBenchmarkReal(keras_benchmark.KerasBenchmark): ...@@ -1046,6 +1045,7 @@ class TrivialKerasBenchmarkReal(keras_benchmark.KerasBenchmark):
FLAGS.num_gpus = 1 FLAGS.num_gpus = 1
FLAGS.enable_eager = True FLAGS.enable_eager = True
FLAGS.enable_xla = True
FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu') FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu')
FLAGS.batch_size = 256 FLAGS.batch_size = 256
self._run_and_report_benchmark() self._run_and_report_benchmark()
...@@ -1056,6 +1056,7 @@ class TrivialKerasBenchmarkReal(keras_benchmark.KerasBenchmark): ...@@ -1056,6 +1056,7 @@ class TrivialKerasBenchmarkReal(keras_benchmark.KerasBenchmark):
FLAGS.num_gpus = 1 FLAGS.num_gpus = 1
FLAGS.enable_eager = False FLAGS.enable_eager = False
FLAGS.enable_xla = True
FLAGS.model_dir = self._get_model_dir('benchmark_graph_1_gpu') FLAGS.model_dir = self._get_model_dir('benchmark_graph_1_gpu')
FLAGS.batch_size = 256 FLAGS.batch_size = 256
self._run_and_report_benchmark() self._run_and_report_benchmark()
...@@ -1066,6 +1067,7 @@ class TrivialKerasBenchmarkReal(keras_benchmark.KerasBenchmark): ...@@ -1066,6 +1067,7 @@ class TrivialKerasBenchmarkReal(keras_benchmark.KerasBenchmark):
FLAGS.num_gpus = 8 FLAGS.num_gpus = 8
FLAGS.enable_eager = True FLAGS.enable_eager = True
FLAGS.enable_xla = True
FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu') FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu')
FLAGS.batch_size = 256 * 8 FLAGS.batch_size = 256 * 8
self._run_and_report_benchmark() self._run_and_report_benchmark()
...@@ -1078,6 +1080,7 @@ class TrivialKerasBenchmarkReal(keras_benchmark.KerasBenchmark): ...@@ -1078,6 +1080,7 @@ class TrivialKerasBenchmarkReal(keras_benchmark.KerasBenchmark):
FLAGS.num_gpus = 8 FLAGS.num_gpus = 8
FLAGS.enable_eager = True FLAGS.enable_eager = True
FLAGS.enable_xla = True
FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_tweaked') FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_tweaked')
FLAGS.batch_size = 256 * 8 FLAGS.batch_size = 256 * 8
FLAGS.tf_gpu_thread_mode = 'gpu_private' FLAGS.tf_gpu_thread_mode = 'gpu_private'
...@@ -1092,6 +1095,7 @@ class TrivialKerasBenchmarkReal(keras_benchmark.KerasBenchmark): ...@@ -1092,6 +1095,7 @@ class TrivialKerasBenchmarkReal(keras_benchmark.KerasBenchmark):
FLAGS.num_gpus = 8 FLAGS.num_gpus = 8
FLAGS.enable_eager = True FLAGS.enable_eager = True
FLAGS.enable_xla = True
FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_slack') FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_slack')
FLAGS.batch_size = 256 * 8 FLAGS.batch_size = 256 * 8
FLAGS.tf_data_experimental_slack = True FLAGS.tf_data_experimental_slack = True
...@@ -1105,6 +1109,7 @@ class TrivialKerasBenchmarkReal(keras_benchmark.KerasBenchmark): ...@@ -1105,6 +1109,7 @@ class TrivialKerasBenchmarkReal(keras_benchmark.KerasBenchmark):
FLAGS.num_gpus = 8 FLAGS.num_gpus = 8
FLAGS.enable_eager = False FLAGS.enable_eager = False
FLAGS.enable_xla = True
FLAGS.model_dir = self._get_model_dir('benchmark_graph_8_gpu') FLAGS.model_dir = self._get_model_dir('benchmark_graph_8_gpu')
FLAGS.batch_size = 256 * 8 FLAGS.batch_size = 256 * 8
self._run_and_report_benchmark() self._run_and_report_benchmark()
...@@ -1117,6 +1122,7 @@ class TrivialKerasBenchmarkReal(keras_benchmark.KerasBenchmark): ...@@ -1117,6 +1122,7 @@ class TrivialKerasBenchmarkReal(keras_benchmark.KerasBenchmark):
FLAGS.num_gpus = 8 FLAGS.num_gpus = 8
FLAGS.enable_eager = False FLAGS.enable_eager = False
FLAGS.enable_xla = True
FLAGS.model_dir = self._get_model_dir('benchmark_graph_8_gpu_tweaked') FLAGS.model_dir = self._get_model_dir('benchmark_graph_8_gpu_tweaked')
FLAGS.batch_size = 256 * 8 FLAGS.batch_size = 256 * 8
FLAGS.tf_gpu_thread_mode = 'gpu_private' FLAGS.tf_gpu_thread_mode = 'gpu_private'
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment