Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ModelZoo
ResNet50_tensorflow
Commits
6545cb3c
Unverified
Commit
6545cb3c
authored
Aug 05, 2019
by
Toby Boyd
Committed by
GitHub
Aug 05, 2019
Browse files
Remove layout_off tests and related utils. (#7359)
parent
ca7d215d
Changes
5
Hide whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
7 additions
and
160 deletions
+7
-160
official/resnet/keras/keras_cifar_main.py
official/resnet/keras/keras_cifar_main.py
+1
-3
official/resnet/keras/keras_common.py
official/resnet/keras/keras_common.py
+0
-9
official/resnet/keras/keras_imagenet_benchmark.py
official/resnet/keras/keras_imagenet_benchmark.py
+0
-116
official/resnet/keras/keras_imagenet_main.py
official/resnet/keras/keras_imagenet_main.py
+1
-3
official/utils/misc/keras_utils.py
official/utils/misc/keras_utils.py
+5
-29
No files found.
official/resnet/keras/keras_cifar_main.py
View file @
6545cb3c
...
...
@@ -79,9 +79,7 @@ def run(flags_obj):
"""
keras_utils
.
set_session_config
(
enable_eager
=
flags_obj
.
enable_eager
,
enable_xla
=
flags_obj
.
enable_xla
,
enable_grappler_layout_optimizer
=
flags_obj
.
enable_grappler_layout_optimizer
)
enable_xla
=
flags_obj
.
enable_xla
)
# Execute flag override logic for better model performance
if
flags_obj
.
tf_gpu_thread_mode
:
...
...
official/resnet/keras/keras_common.py
View file @
6545cb3c
...
...
@@ -310,15 +310,6 @@ def define_keras_flags(dynamic_loss_scale=True):
flags
.
DEFINE_boolean
(
name
=
'enable_get_next_as_optional'
,
default
=
False
,
help
=
'Enable get_next_as_optional behavior in DistributedIterator.'
)
# TODO(b/76028325): Remove when generic layout optimizer is ready.
flags
.
DEFINE_boolean
(
name
=
'enable_grappler_layout_optimizer'
,
default
=
True
,
help
=
'Enable Grappler layout optimizer. Currently Grappler can '
'de-optimize fp16 graphs byt forcing NCHW layout for all '
'convolutions and batch normalizations, and this flag allows to '
'disable it.'
)
def
get_synth_input_fn
(
height
,
width
,
num_channels
,
num_classes
,
dtype
=
tf
.
float32
,
drop_remainder
=
True
):
...
...
official/resnet/keras/keras_imagenet_benchmark.py
View file @
6545cb3c
...
...
@@ -344,17 +344,6 @@ class Resnet50KerasBenchmarkBase(keras_benchmark.KerasBenchmark):
FLAGS
.
batch_size
=
128
self
.
_run_and_report_benchmark
()
def
benchmark_1_gpu_layout_off
(
self
):
"""Test Keras model with 1 GPU and no layout optimizer."""
self
.
_setup
()
FLAGS
.
num_gpus
=
1
FLAGS
.
enable_eager
=
True
FLAGS
.
distribution_strategy
=
'default'
FLAGS
.
model_dir
=
self
.
_get_model_dir
(
'benchmark_1_gpu_layout_off'
)
FLAGS
.
batch_size
=
128
FLAGS
.
enable_grappler_layout_optimizer
=
False
self
.
_run_and_report_benchmark
()
def
benchmark_xla_1_gpu
(
self
):
"""Test Keras model with XLA and 1 GPU."""
...
...
@@ -368,18 +357,6 @@ class Resnet50KerasBenchmarkBase(keras_benchmark.KerasBenchmark):
FLAGS
.
batch_size
=
128
self
.
_run_and_report_benchmark
()
def
benchmark_xla_1_gpu_layout_off
(
self
):
"""Test Keras model with 1 GPU and xla w/no layout optimizer."""
self
.
_setup
()
FLAGS
.
num_gpus
=
1
FLAGS
.
enable_eager
=
True
FLAGS
.
distribution_strategy
=
'default'
FLAGS
.
model_dir
=
self
.
_get_model_dir
(
'benchmark_xla_1_gpu_layout_off'
)
FLAGS
.
batch_size
=
128
FLAGS
.
enable_grappler_layout_optimizer
=
False
self
.
_run_and_report_benchmark
()
def
benchmark_1_gpu_fp16
(
self
):
"""Test Keras model with 1 GPU and fp16."""
self
.
_setup
()
...
...
@@ -392,20 +369,6 @@ class Resnet50KerasBenchmarkBase(keras_benchmark.KerasBenchmark):
FLAGS
.
batch_size
=
256
self
.
_run_and_report_benchmark
()
def
benchmark_1_gpu_fp16_layout_off
(
self
):
"""Test Keras model with 1 GPU and FP16 w/no layout optimizer."""
self
.
_setup
()
FLAGS
.
num_gpus
=
1
FLAGS
.
enable_eager
=
True
FLAGS
.
distribution_strategy
=
'default'
FLAGS
.
model_dir
=
self
.
_get_model_dir
(
'benchmark_1_gpu_fp16_layout_off'
)
FLAGS
.
dtype
=
'fp16'
FLAGS
.
batch_size
=
256
FLAGS
.
enable_grappler_layout_optimizer
=
False
FLAGS
.
data_format
=
'channels_last'
self
.
_run_and_report_benchmark
()
def
benchmark_1_gpu_fp16_dynamic
(
self
):
"""Test Keras model with 1 GPU, fp16, and dynamic loss scaling."""
self
.
_setup
()
...
...
@@ -432,21 +395,6 @@ class Resnet50KerasBenchmarkBase(keras_benchmark.KerasBenchmark):
FLAGS
.
batch_size
=
256
self
.
_run_and_report_benchmark
()
def
benchmark_xla_1_gpu_fp16_layout_off
(
self
):
"""Test Keras model with FP16+XLA w/no layout optimizer."""
self
.
_setup
()
FLAGS
.
num_gpus
=
1
FLAGS
.
enable_eager
=
True
FLAGS
.
enable_xla
=
True
FLAGS
.
distribution_strategy
=
'default'
FLAGS
.
model_dir
=
self
.
_get_model_dir
(
'benchmark_xla_1_gpu_fp16_layout_off'
)
FLAGS
.
dtype
=
'fp16'
FLAGS
.
batch_size
=
256
FLAGS
.
enable_grappler_layout_optimizer
=
False
FLAGS
.
data_format
=
'channels_last'
self
.
_run_and_report_benchmark
()
def
benchmark_xla_1_gpu_fp16_tweaked
(
self
):
"""Test Keras model with XLA, 1 GPU, fp16, and manual config tuning."""
self
.
_setup
()
...
...
@@ -656,20 +604,6 @@ class Resnet50KerasBenchmarkBase(keras_benchmark.KerasBenchmark):
FLAGS
.
batch_size
=
256
*
8
# 8 GPUs
self
.
_run_and_report_benchmark
()
def
benchmark_8_gpu_fp16_layout_off
(
self
):
"""Test Keras model with 8 GPUs, fp16, and layout off."""
self
.
_setup
()
FLAGS
.
num_gpus
=
8
FLAGS
.
dtype
=
'fp16'
FLAGS
.
enable_eager
=
True
FLAGS
.
distribution_strategy
=
'default'
FLAGS
.
model_dir
=
self
.
_get_model_dir
(
'benchmark_8_gpu_fp16_layout_off'
)
FLAGS
.
batch_size
=
256
*
8
# 8 GPUs
FLAGS
.
enable_grappler_layout_optimizer
=
False
FLAGS
.
data_format
=
'channels_last'
self
.
_run_and_report_benchmark
()
def
benchmark_8_gpu_fp16_tweaked
(
self
):
"""Test Keras model with 8 GPUs, fp16, and manual config tuning."""
self
.
_setup
()
...
...
@@ -684,24 +618,6 @@ class Resnet50KerasBenchmarkBase(keras_benchmark.KerasBenchmark):
FLAGS
.
tf_gpu_thread_mode
=
'gpu_private'
self
.
_run_and_report_benchmark
()
def
benchmark_8_gpu_fp16_tweaked_layout_off
(
self
):
"""Test Keras model with 8 GPUs, fp16,tuning, and layout off."""
self
.
_setup
()
FLAGS
.
num_gpus
=
8
FLAGS
.
dtype
=
'fp16'
FLAGS
.
enable_eager
=
True
FLAGS
.
distribution_strategy
=
'default'
FLAGS
.
model_dir
=
self
.
_get_model_dir
(
'benchmark_8_gpu_fp16_tweaked_layout_off'
)
FLAGS
.
batch_size
=
256
*
8
# 8 GPUs
FLAGS
.
use_tensor_lr
=
True
FLAGS
.
tf_gpu_thread_mode
=
'gpu_private'
FLAGS
.
data_delay_prefetch
=
True
FLAGS
.
enable_grappler_layout_optimizer
=
False
FLAGS
.
data_format
=
'channels_last'
self
.
_run_and_report_benchmark
()
def
benchmark_8_gpu_fp16_dynamic_tweaked
(
self
):
"""Test Keras model with 8 GPUs, fp16, dynamic loss scaling, and tuned."""
self
.
_setup
()
...
...
@@ -749,21 +665,6 @@ class Resnet50KerasBenchmarkBase(keras_benchmark.KerasBenchmark):
FLAGS
.
batch_size
=
256
*
8
# 8 GPUs
self
.
_run_and_report_benchmark
()
def
benchmark_xla_8_gpu_fp16_layout_off
(
self
):
"""Test Keras model with XLA, 8 GPUs, fp16, and layout off."""
self
.
_setup
()
FLAGS
.
num_gpus
=
8
FLAGS
.
dtype
=
'fp16'
FLAGS
.
enable_eager
=
True
FLAGS
.
enable_xla
=
True
FLAGS
.
distribution_strategy
=
'default'
FLAGS
.
model_dir
=
self
.
_get_model_dir
(
'benchmark_xla_8_gpu_fp16_layout_off'
)
FLAGS
.
batch_size
=
256
*
8
# 8 GPUs
FLAGS
.
enable_grappler_layout_optimizer
=
False
FLAGS
.
data_format
=
'channels_last'
self
.
_run_and_report_benchmark
()
def
benchmark_xla_8_gpu_fp16_tweaked
(
self
):
"""Test Keras model with manual config tuning, XLA, 8 GPUs and fp16."""
self
.
_setup
()
...
...
@@ -780,23 +681,6 @@ class Resnet50KerasBenchmarkBase(keras_benchmark.KerasBenchmark):
FLAGS
.
datasets_num_private_threads
=
48
self
.
_run_and_report_benchmark
()
def
benchmark_xla_8_gpu_fp16_tweaked_layout_off
(
self
):
"""Test with tuning, FP16+XLA, and layout_off."""
self
.
_setup
()
FLAGS
.
num_gpus
=
8
FLAGS
.
dtype
=
'fp16'
FLAGS
.
enable_eager
=
True
FLAGS
.
enable_xla
=
True
FLAGS
.
distribution_strategy
=
'default'
FLAGS
.
model_dir
=
self
.
_get_model_dir
(
'benchmark_xla_8_gpu_fp16_tweaked_layout_off'
)
FLAGS
.
batch_size
=
256
*
8
FLAGS
.
use_tensor_lr
=
True
FLAGS
.
enable_grappler_layout_optimizer
=
False
FLAGS
.
data_format
=
'channels_last'
self
.
_run_and_report_benchmark
()
def
benchmark_xla_8_gpu_fp16_tweaked_delay_measure
(
self
):
"""Test with manual config tuning, XLA, 8 GPUs and fp16.
...
...
official/resnet/keras/keras_imagenet_main.py
View file @
6545cb3c
...
...
@@ -85,9 +85,7 @@ def run(flags_obj):
"""
keras_utils
.
set_session_config
(
enable_eager
=
flags_obj
.
enable_eager
,
enable_xla
=
flags_obj
.
enable_xla
,
enable_grappler_layout_optimizer
=
flags_obj
.
enable_grappler_layout_optimizer
)
enable_xla
=
flags_obj
.
enable_xla
)
# Execute flag override logic for better model performance
if
flags_obj
.
tf_gpu_thread_mode
:
...
...
official/utils/misc/keras_utils.py
View file @
6545cb3c
...
...
@@ -142,17 +142,12 @@ class ProfilerCallback(tf.keras.callbacks.Callback):
def
set_session_config
(
enable_eager
=
False
,
enable_xla
=
False
,
enable_grappler_layout_optimizer
=
True
):
enable_xla
=
False
):
"""Sets the session config."""
if
is_v2_0
():
set_config_v2
(
enable_xla
=
enable_xla
,
enable_grappler_layout_optimizer
=
enable_grappler_layout_optimizer
)
set_config_v2
(
enable_xla
=
enable_xla
)
else
:
config
=
get_config_proto_v1
(
enable_xla
=
enable_xla
,
enable_grappler_layout_optimizer
=
enable_grappler_layout_optimizer
)
config
=
get_config_proto_v1
(
enable_xla
=
enable_xla
)
if
enable_eager
:
tf
.
compat
.
v1
.
enable_eager_execution
(
config
=
config
)
else
:
...
...
@@ -160,8 +155,7 @@ def set_session_config(enable_eager=False,
tf
.
keras
.
backend
.
set_session
(
sess
)
def
get_config_proto_v1
(
enable_xla
=
False
,
enable_grappler_layout_optimizer
=
True
):
def
get_config_proto_v1
(
enable_xla
=
False
):
"""Return config proto according to flag settings, or None to use default."""
config
=
None
if
enable_xla
:
...
...
@@ -172,20 +166,10 @@ def get_config_proto_v1(enable_xla=False,
# OOM and performance regression.
config
.
graph_options
.
rewrite_options
.
pin_to_host_optimization
=
(
rewriter_config_pb2
.
RewriterConfig
.
OFF
)
# TODO(b/76028325): Remove when generic layout optimizer will be ready.
if
not
enable_grappler_layout_optimizer
:
if
config
is
None
:
config
=
tf
.
compat
.
v1
.
ConfigProto
()
# Disable LayoutOptimizer in grappler, because it might de-optimize fp16
# graphs, and force NCHW data format in all convolutions and batch
# normalizations.
config
.
graph_options
.
rewrite_options
.
layout_optimizer
=
(
rewriter_config_pb2
.
RewriterConfig
.
OFF
)
return
config
def
set_config_v2
(
enable_xla
=
False
,
enable_grappler_layout_optimizer
=
False
):
def
set_config_v2
(
enable_xla
=
False
):
"""Config eager context according to flag values using TF 2.0 API."""
if
enable_xla
:
tf
.
config
.
optimizer
.
set_jit
(
True
)
...
...
@@ -194,14 +178,6 @@ def set_config_v2(enable_xla=False,
tf
.
config
.
optimizer
.
set_experimental_options
(
{
'pin_to_host_optimization'
:
False
}
)
# TODO(b/76028325): Remove when generic layout optimizer will be ready.
if
not
enable_grappler_layout_optimizer
:
# Disable LayoutOptimizer in grappler, because it might de-optimize fp16
# graphs, and force NCHW data format in all convolutions and batch
# normalizations.
tf
.
config
.
optimizer
.
set_experimental_options
(
{
'layout_optimizer'
:
False
}
)
def
is_v2_0
():
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment