Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ModelZoo
ResNet50_tensorflow
Commits
ef30de93
"...git@developer.sourcefind.cn:OpenDAS/mmdetection3d.git" did not exist on "2936b3b3cbd479faa202ff9c47102fc98cecda07"
Commit
ef30de93
authored
Aug 10, 2019
by
Vinh Nguyen
Browse files
adding perfzero test configs to keras_imagenet_benchmarks.py
parent
63d84bff
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
67 additions
and
1 deletion
+67
-1
official/resnet/keras/keras_imagenet_benchmark.py
official/resnet/keras/keras_imagenet_benchmark.py
+67
-1
No files found.
official/resnet/keras/keras_imagenet_benchmark.py
View file @
ef30de93
...
@@ -78,7 +78,24 @@ class Resnet50KerasAccuracy(keras_benchmark.KerasBenchmark):
...
@@ -78,7 +78,24 @@ class Resnet50KerasAccuracy(keras_benchmark.KerasBenchmark):
FLAGS
.
datasets_num_private_threads
=
14
FLAGS
.
datasets_num_private_threads
=
14
FLAGS
.
use_tensor_lr
=
True
FLAGS
.
use_tensor_lr
=
True
self
.
_run_and_report_benchmark
()
self
.
_run_and_report_benchmark
()
def
benchmark_8_gpu_amp
(
self
):
"""Test Keras model with eager, dist_strat and 8 GPUs with automatic mixed precision."""
self
.
_setup
()
FLAGS
.
num_gpus
=
8
FLAGS
.
data_dir
=
self
.
data_dir
FLAGS
.
batch_size
=
128
*
8
FLAGS
.
train_epochs
=
90
FLAGS
.
epochs_between_evals
=
10
FLAGS
.
model_dir
=
self
.
_get_model_dir
(
'benchmark_8_gpu_amp'
)
FLAGS
.
dtype
=
'fp32'
FLAGS
.
enable_eager
=
True
FLAGS
.
automatic_mixed_precision
=
True
# Add some thread tunings to improve performance.
FLAGS
.
datasets_num_private_threads
=
14
FLAGS
.
use_tensor_lr
=
True
self
.
_run_and_report_benchmark
()
def
benchmark_8_gpu_fp16
(
self
):
def
benchmark_8_gpu_fp16
(
self
):
"""Test Keras model with eager, dist_strat, 8 GPUs, and fp16."""
"""Test Keras model with eager, dist_strat, 8 GPUs, and fp16."""
self
.
_setup
()
self
.
_setup
()
...
@@ -371,6 +388,17 @@ class Resnet50KerasBenchmarkBase(keras_benchmark.KerasBenchmark):
...
@@ -371,6 +388,17 @@ class Resnet50KerasBenchmarkBase(keras_benchmark.KerasBenchmark):
FLAGS
.
batch_size
=
128
FLAGS
.
batch_size
=
128
self
.
_run_and_report_benchmark
()
self
.
_run_and_report_benchmark
()
def
benchmark_1_gpu_amp
(
self
):
"""Test Keras model with 1 GPU with automatic mixed precision."""
self
.
_setup
()
FLAGS
.
num_gpus
=
1
FLAGS
.
enable_eager
=
True
FLAGS
.
automatic_mixed_precision
=
True
FLAGS
.
distribution_strategy
=
'default'
FLAGS
.
model_dir
=
self
.
_get_model_dir
(
'benchmark_1_gpu_amp'
)
FLAGS
.
batch_size
=
256
self
.
_run_and_report_benchmark
()
def
benchmark_xla_1_gpu
(
self
):
def
benchmark_xla_1_gpu
(
self
):
"""Test Keras model with XLA and 1 GPU."""
"""Test Keras model with XLA and 1 GPU."""
...
@@ -384,6 +412,19 @@ class Resnet50KerasBenchmarkBase(keras_benchmark.KerasBenchmark):
...
@@ -384,6 +412,19 @@ class Resnet50KerasBenchmarkBase(keras_benchmark.KerasBenchmark):
FLAGS
.
batch_size
=
128
FLAGS
.
batch_size
=
128
self
.
_run_and_report_benchmark
()
self
.
_run_and_report_benchmark
()
def
benchmark_xla_1_gpu_amp
(
self
):
"""Test Keras model with XLA and 1 GPU with automatic mixed precision."""
self
.
_setup
()
FLAGS
.
num_gpus
=
1
FLAGS
.
enable_eager
=
True
FLAGS
.
automatic_mixed_precision
=
True
FLAGS
.
enable_xla
=
True
FLAGS
.
distribution_strategy
=
'default'
FLAGS
.
model_dir
=
self
.
_get_model_dir
(
'benchmark_xla_1_gpu_amp'
)
FLAGS
.
batch_size
=
256
self
.
_run_and_report_benchmark
()
def
benchmark_1_gpu_fp16
(
self
):
def
benchmark_1_gpu_fp16
(
self
):
"""Test Keras model with 1 GPU and fp16."""
"""Test Keras model with 1 GPU and fp16."""
self
.
_setup
()
self
.
_setup
()
...
@@ -555,6 +596,18 @@ class Resnet50KerasBenchmarkBase(keras_benchmark.KerasBenchmark):
...
@@ -555,6 +596,18 @@ class Resnet50KerasBenchmarkBase(keras_benchmark.KerasBenchmark):
FLAGS
.
batch_size
=
128
*
8
# 8 GPUs
FLAGS
.
batch_size
=
128
*
8
# 8 GPUs
self
.
_run_and_report_benchmark
()
self
.
_run_and_report_benchmark
()
def
benchmark_8_gpu_amp
(
self
):
"""Test Keras model with 8 GPUs with automatic mixed precision."""
self
.
_setup
()
FLAGS
.
num_gpus
=
8
FLAGS
.
enable_eager
=
True
FLAGS
.
automatic_mixed_precision
=
True
FLAGS
.
distribution_strategy
=
'default'
FLAGS
.
model_dir
=
self
.
_get_model_dir
(
'benchmark_8_gpu_amp'
)
FLAGS
.
batch_size
=
256
*
8
# 8 GPUs
self
.
_run_and_report_benchmark
()
def
benchmark_8_gpu_tweaked
(
self
):
def
benchmark_8_gpu_tweaked
(
self
):
"""Test Keras model with manual config tuning and 8 GPUs."""
"""Test Keras model with manual config tuning and 8 GPUs."""
self
.
_setup
()
self
.
_setup
()
...
@@ -592,6 +645,19 @@ class Resnet50KerasBenchmarkBase(keras_benchmark.KerasBenchmark):
...
@@ -592,6 +645,19 @@ class Resnet50KerasBenchmarkBase(keras_benchmark.KerasBenchmark):
FLAGS
.
batch_size
=
128
*
8
# 8 GPUs
FLAGS
.
batch_size
=
128
*
8
# 8 GPUs
self
.
_run_and_report_benchmark
()
self
.
_run_and_report_benchmark
()
def
benchmark_xla_8_gpu_amp
(
self
):
"""Test Keras model with XLA and 8 GPUs with automatic mixed precision."""
self
.
_setup
()
FLAGS
.
num_gpus
=
8
FLAGS
.
enable_eager
=
True
FLAGS
.
automatic_mixed_precision
=
True
FLAGS
.
enable_xla
=
True
FLAGS
.
distribution_strategy
=
'default'
FLAGS
.
model_dir
=
self
.
_get_model_dir
(
'benchmark_xla_8_gpu_amp'
)
FLAGS
.
batch_size
=
256
*
8
# 8 GPUs
self
.
_run_and_report_benchmark
()
def
benchmark_xla_8_gpu_tweaked
(
self
):
def
benchmark_xla_8_gpu_tweaked
(
self
):
"""Test Keras model with manual config tuning, 8 GPUs, and XLA."""
"""Test Keras model with manual config tuning, 8 GPUs, and XLA."""
self
.
_setup
()
self
.
_setup
()
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment