_performance.py 12.1 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Register flags for optimizing performance."""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import multiprocessing

Hongkun Yu's avatar
Hongkun Yu committed
23
24
from absl import flags  # pylint: disable=g-bad-import-order
import tensorflow as tf  # pylint: disable=g-bad-import-order
25
26
27

from official.utils.flags._conventions import help_wrap

28
# Map string to TensorFlow dtype
29
DTYPE_MAP = {
30
    "fp16": tf.float16,
A. Unique TensorFlower's avatar
A. Unique TensorFlower committed
31
    "bf16": tf.bfloat16,
32
    "fp32": tf.float32,
33
34
35
36
}


def get_tf_dtype(flags_obj):
37
  if getattr(flags_obj, "fp16_implementation", None) == "graph_rewrite":
38
39
40
    # If the graph_rewrite is used, we build the graph with fp32, and let the
    # graph rewrite change ops to fp16.
    return tf.float32
41
  return DTYPE_MAP[flags_obj.dtype]
42
43


44
def get_loss_scale(flags_obj, default_for_fp16):
45
  dtype = get_tf_dtype(flags_obj)
46
  if flags_obj.loss_scale == "dynamic":
47
    return flags_obj.loss_scale
48
49
  elif flags_obj.loss_scale is not None:
    return float(flags_obj.loss_scale)
50
  elif dtype == tf.float32 or dtype == tf.bfloat16:
51
52
    return 1  # No loss scaling is needed for fp32
  else:
53
    assert dtype == tf.float16
54
    return default_for_fp16
55
56


Hongkun Yu's avatar
Hongkun Yu committed
57
58
59
60
61
62
63
64
def define_performance(num_parallel_calls=False,
                       inter_op=False,
                       intra_op=False,
                       synthetic_data=False,
                       max_train_steps=False,
                       dtype=False,
                       all_reduce_alg=False,
                       num_packs=False,
65
                       tf_gpu_thread_mode=False,
Toby Boyd's avatar
Toby Boyd committed
66
                       datasets_num_private_threads=False,
67
                       datasets_num_parallel_batches=False,
Hongkun Yu's avatar
Hongkun Yu committed
68
69
                       dynamic_loss_scale=False,
                       fp16_implementation=False,
70
                       loss_scale=False,
Hongkun Yu's avatar
Hongkun Yu committed
71
72
                       tf_data_experimental_slack=False,
                       enable_xla=False,
73
                       training_dataset_cache=False):
74
75
76
77
78
79
80
  """Register flags for specifying performance tuning arguments.

  Args:
    num_parallel_calls: Create a flag to specify parallelism of data loading.
    inter_op: Create a flag to allow specification of inter op threads.
    intra_op: Create a flag to allow specification of intra op threads.
    synthetic_data: Create a flag to allow the use of synthetic data.
Hongkun Yu's avatar
Hongkun Yu committed
81
82
    max_train_steps: Create a flags to allow specification of maximum number of
      training steps
83
    dtype: Create flags for specifying dtype.
Toby Boyd's avatar
Toby Boyd committed
84
    all_reduce_alg: If set forces a specific algorithm for multi-gpu.
85
86
    num_packs: If set provides number of packs for MirroredStrategy's cross
      device ops.
Toby Boyd's avatar
Toby Boyd committed
87
88
    tf_gpu_thread_mode: gpu_private triggers us of private thread pool.
    datasets_num_private_threads: Number of private threads for datasets.
Toby Boyd's avatar
Toby Boyd committed
89
    datasets_num_parallel_batches: Determines how many batches to process in
Hongkun Yu's avatar
Hongkun Yu committed
90
      parallel when using map and batch from tf.data.
91
92
    dynamic_loss_scale: Allow the "loss_scale" flag to take on the value
      "dynamic". Only valid if `dtype` is True.
93
    fp16_implementation: Create fp16_implementation flag.
94
95
    loss_scale: Controls the loss scaling, normally for mixed-precision
      training. Can only be turned on if dtype is also True.
96
97
    tf_data_experimental_slack: Determines whether to enable tf.data's
      `experimental_slack` option.
Toby Boyd's avatar
Toby Boyd committed
98
    enable_xla: Determines if XLA (auto clustering) is turned on.
99
    training_dataset_cache: Whether to cache the training dataset on workers.
Hongkun Yu's avatar
Hongkun Yu committed
100
101
      Typically used to improve training performance when training data is in
      remote storage and can fit into worker memory.
Toby Boyd's avatar
Toby Boyd committed
102

103
104
105
106
107
108
109
  Returns:
    A list of flags for core.py to marks as key flags.
  """

  key_flags = []
  if num_parallel_calls:
    flags.DEFINE_integer(
Hongkun Yu's avatar
Hongkun Yu committed
110
111
        name="num_parallel_calls",
        short_name="npc",
Toby Boyd's avatar
Toby Boyd committed
112
        default=multiprocessing.cpu_count(),
113
114
115
116
117
118
119
120
        help=help_wrap("The number of records that are  processed in parallel "
                       "during input processing. This can be optimized per "
                       "data set but for generally homogeneous data sets, "
                       "should be approximately the number of available CPU "
                       "cores. (default behavior)"))

  if inter_op:
    flags.DEFINE_integer(
Hongkun Yu's avatar
Hongkun Yu committed
121
122
123
        name="inter_op_parallelism_threads",
        short_name="inter",
        default=0,
124
        help=help_wrap("Number of inter_op_parallelism_threads to use for CPU. "
Hongkun Yu's avatar
Hongkun Yu committed
125
                       "See TensorFlow config.proto for details."))
126
127
128

  if intra_op:
    flags.DEFINE_integer(
Hongkun Yu's avatar
Hongkun Yu committed
129
130
131
        name="intra_op_parallelism_threads",
        short_name="intra",
        default=0,
132
133
134
135
136
        help=help_wrap("Number of intra_op_parallelism_threads to use for CPU. "
                       "See TensorFlow config.proto for details."))

  if synthetic_data:
    flags.DEFINE_bool(
Hongkun Yu's avatar
Hongkun Yu committed
137
138
139
        name="use_synthetic_data",
        short_name="synth",
        default=False,
140
141
142
143
144
145
146
        help=help_wrap(
            "If set, use fake data (zeroes) instead of a real dataset. "
            "This mode is useful for performance debugging, as it removes "
            "input processing steps, but will not learn anything."))

  if max_train_steps:
    flags.DEFINE_integer(
Hongkun Yu's avatar
Hongkun Yu committed
147
148
149
150
        name="max_train_steps",
        short_name="mts",
        default=None,
        help=help_wrap(
151
152
153
            "The model will stop training if the global_step reaches this "
            "value. If not set, training will run until the specified number "
            "of epochs have run as usual. It is generally recommended to set "
Hongkun Yu's avatar
Hongkun Yu committed
154
            "--train_epochs=1 when using this flag."))
155
156
157

  if dtype:
    flags.DEFINE_enum(
Hongkun Yu's avatar
Hongkun Yu committed
158
159
160
        name="dtype",
        short_name="dt",
        default="fp32",
161
162
163
164
165
        enum_values=DTYPE_MAP.keys(),
        help=help_wrap("The TensorFlow datatype used for calculations. "
                       "Variables may be cast to a higher precision on a "
                       "case-by-case basis for numerical stability."))

166
167
168
169
170
171
172
173
    loss_scale_help_text = (
        "The amount to scale the loss by when the model is run. {}. Before "
        "gradients are computed, the loss is multiplied by the loss scale, "
        "making all gradients loss_scale times larger. To adjust for this, "
        "gradients are divided by the loss scale before being applied to "
        "variables. This is mathematically equivalent to training without "
        "a loss scale, but the loss scale helps avoid some intermediate "
        "gradients from underflowing to zero. If not provided the default "
Hongkun Yu's avatar
Hongkun Yu committed
174
        "for fp16 is 128 and 1 for all other dtypes.{}")
175
176
177
178
179
180
181
182
183
184
185
186
    if dynamic_loss_scale:
      loss_scale_help_text = loss_scale_help_text.format(
          "This can be an int/float or the string 'dynamic'",
          " The string 'dynamic' can be used to dynamically determine the "
          "optimal loss scale during training, but currently this "
          "significantly slows down performance")
      loss_scale_validation_msg = ("loss_scale should be a positive int/float "
                                   "or the string 'dynamic'.")
    else:
      loss_scale_help_text = loss_scale_help_text.format(
          "This must be an int/float", "")
      loss_scale_validation_msg = "loss_scale should be a positive int/float."
187
188
    if loss_scale:
      flags.DEFINE_string(
Hongkun Yu's avatar
Hongkun Yu committed
189
190
191
          name="loss_scale",
          short_name="ls",
          default=None,
192
          help=help_wrap(loss_scale_help_text))
193

Hongkun Yu's avatar
Hongkun Yu committed
194
195
      @flags.validator(
          flag_name="loss_scale", message=loss_scale_validation_msg)
196
197
198
199
      def _check_loss_scale(loss_scale):  # pylint: disable=unused-variable
        """Validator to check the loss scale flag is valid."""
        if loss_scale is None:
          return True  # null case is handled in get_loss_scale()
200

201
202
        if loss_scale == "dynamic" and dynamic_loss_scale:
          return True
203

204
205
206
207
        try:
          loss_scale = float(loss_scale)
        except ValueError:
          return False
208

209
        return loss_scale > 0
210

211
212
    if fp16_implementation:
      flags.DEFINE_enum(
Hongkun Yu's avatar
Hongkun Yu committed
213
214
          name="fp16_implementation",
          default="keras",
215
          enum_values=("keras', 'graph_rewrite"),
216
217
          help=help_wrap(
              "When --dtype=fp16, how fp16 should be implemented. This has no "
218
219
220
221
              "impact on correctness. 'keras' uses the "
              "tf.keras.mixed_precision API. 'graph_rewrite' uses the "
              "tf.train.experimental.enable_mixed_precision_graph_rewrite "
              "API."))
222

Hongkun Yu's avatar
Hongkun Yu committed
223
224
      @flags.multi_flags_validator(
          ["fp16_implementation", "dtype", "loss_scale"])
225
226
      def _check_fp16_implementation(flags_dict):
        """Validator to check fp16_implementation flag is valid."""
227
228
229
230
        if (flags_dict["fp16_implementation"] == "graph_rewrite" and
            flags_dict["dtype"] != "fp16"):
          raise flags.ValidationError("--fp16_implementation should not be "
                                      "specified unless --dtype=fp16")
231
232
        return True

233
234
  if all_reduce_alg:
    flags.DEFINE_string(
Hongkun Yu's avatar
Hongkun Yu committed
235
236
237
        name="all_reduce_alg",
        short_name="ara",
        default=None,
238
        help=help_wrap("Defines the algorithm to use for performing all-reduce."
239
240
241
242
243
244
245
                       "When specified with MirroredStrategy for single "
                       "worker, this controls "
                       "tf.contrib.distribute.AllReduceCrossTowerOps.  When "
                       "specified with MultiWorkerMirroredStrategy, this "
                       "controls "
                       "tf.distribute.experimental.CollectiveCommunication; "
                       "valid options are `ring` and `nccl`."))
246

247
248
  if num_packs:
    flags.DEFINE_integer(
Hongkun Yu's avatar
Hongkun Yu committed
249
250
        name="num_packs",
        default=1,
251
252
253
254
        help=help_wrap("Sets `num_packs` in the cross device ops used in "
                       "MirroredStrategy.  For details, see "
                       "tf.distribute.NcclAllReduce."))

Toby Boyd's avatar
Toby Boyd committed
255
256
  if tf_gpu_thread_mode:
    flags.DEFINE_string(
Hongkun Yu's avatar
Hongkun Yu committed
257
258
259
        name="tf_gpu_thread_mode",
        short_name="gt_mode",
        default=None,
Toby Boyd's avatar
Toby Boyd committed
260
        help=help_wrap(
Hongkun Yu's avatar
Hongkun Yu committed
261
            "Whether and how the GPU device uses its own threadpool."))
Toby Boyd's avatar
Toby Boyd committed
262

263
    flags.DEFINE_integer(
Hongkun Yu's avatar
Hongkun Yu committed
264
265
266
267
268
        name="per_gpu_thread_count",
        short_name="pgtc",
        default=0,
        help=help_wrap("The number of threads to use for GPU. Only valid when "
                       "tf_gpu_thread_mode is not global."))
269

Toby Boyd's avatar
Toby Boyd committed
270
271
  if datasets_num_private_threads:
    flags.DEFINE_integer(
Toby Boyd's avatar
Toby Boyd committed
272
        name="datasets_num_private_threads",
Toby Boyd's avatar
Toby Boyd committed
273
274
275
        default=None,
        help=help_wrap(
            "Number of threads for a private threadpool created for all"
Hongkun Yu's avatar
Hongkun Yu committed
276
            "datasets computation.."))
277

Toby Boyd's avatar
Toby Boyd committed
278
279
280
281
282
283
  if datasets_num_parallel_batches:
    flags.DEFINE_integer(
        name="datasets_num_parallel_batches",
        default=None,
        help=help_wrap(
            "Determines how many batches to process in parallel when using "
Hongkun Yu's avatar
Hongkun Yu committed
284
            "map and batch from tf.data."))
Toby Boyd's avatar
Toby Boyd committed
285

286
287
288
289
290
291
292
  if training_dataset_cache:
    flags.DEFINE_boolean(
        name="training_dataset_cache",
        default=False,
        help=help_wrap(
            "Determines whether to cache the training dataset on workers. "
            "Typically used to improve training performance when training "
Hongkun Yu's avatar
Hongkun Yu committed
293
            "data is in remote storage and can fit into worker memory."))
294

295
296
297
298
299
  if tf_data_experimental_slack:
    flags.DEFINE_boolean(
        name="tf_data_experimental_slack",
        default=False,
        help=help_wrap(
Hongkun Yu's avatar
Hongkun Yu committed
300
            "Whether to enable tf.data's `experimental_slack` option."))
301

Toby Boyd's avatar
Toby Boyd committed
302
303
  if enable_xla:
    flags.DEFINE_boolean(
Hongkun Yu's avatar
Hongkun Yu committed
304
305
        name="enable_xla",
        default=False,
Toby Boyd's avatar
Toby Boyd committed
306
307
        help="Whether to enable XLA auto jit compilation")

308
  return key_flags