_performance.py 11.3 KB
Newer Older
Hongkun Yu's avatar
Hongkun Yu committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
16
17
18
"""Register flags for optimizing performance."""

import multiprocessing

Hongkun Yu's avatar
Hongkun Yu committed
19
20
from absl import flags  # pylint: disable=g-bad-import-order
import tensorflow as tf  # pylint: disable=g-bad-import-order
21
22
23

from official.utils.flags._conventions import help_wrap

24
# Map string to TensorFlow dtype
25
DTYPE_MAP = {
26
    "fp16": tf.float16,
A. Unique TensorFlower's avatar
A. Unique TensorFlower committed
27
    "bf16": tf.bfloat16,
28
    "fp32": tf.float32,
29
30
31
32
}


def get_tf_dtype(flags_obj):
33
  if getattr(flags_obj, "fp16_implementation", None) == "graph_rewrite":
34
35
36
    # If the graph_rewrite is used, we build the graph with fp32, and let the
    # graph rewrite change ops to fp16.
    return tf.float32
37
  return DTYPE_MAP[flags_obj.dtype]
38
39


40
def get_loss_scale(flags_obj, default_for_fp16):
41
  dtype = get_tf_dtype(flags_obj)
42
  if flags_obj.loss_scale == "dynamic":
43
    return flags_obj.loss_scale
44
45
  elif flags_obj.loss_scale is not None:
    return float(flags_obj.loss_scale)
46
  elif dtype == tf.float32 or dtype == tf.bfloat16:
47
48
    return 1  # No loss scaling is needed for fp32
  else:
49
    assert dtype == tf.float16
50
    return default_for_fp16
51
52


Hongkun Yu's avatar
Hongkun Yu committed
53
54
55
56
57
58
59
60
def define_performance(num_parallel_calls=False,
                       inter_op=False,
                       intra_op=False,
                       synthetic_data=False,
                       max_train_steps=False,
                       dtype=False,
                       all_reduce_alg=False,
                       num_packs=False,
61
                       tf_gpu_thread_mode=False,
Toby Boyd's avatar
Toby Boyd committed
62
                       datasets_num_private_threads=False,
63
                       datasets_num_parallel_batches=False,
Hongkun Yu's avatar
Hongkun Yu committed
64
                       fp16_implementation=False,
65
                       loss_scale=False,
Hongkun Yu's avatar
Hongkun Yu committed
66
67
                       tf_data_experimental_slack=False,
                       enable_xla=False,
68
                       training_dataset_cache=False):
69
70
71
72
73
74
75
  """Register flags for specifying performance tuning arguments.

  Args:
    num_parallel_calls: Create a flag to specify parallelism of data loading.
    inter_op: Create a flag to allow specification of inter op threads.
    intra_op: Create a flag to allow specification of intra op threads.
    synthetic_data: Create a flag to allow the use of synthetic data.
Hongkun Yu's avatar
Hongkun Yu committed
76
77
    max_train_steps: Create a flags to allow specification of maximum number of
      training steps
78
    dtype: Create flags for specifying dtype.
Toby Boyd's avatar
Toby Boyd committed
79
    all_reduce_alg: If set forces a specific algorithm for multi-gpu.
80
81
    num_packs: If set provides number of packs for MirroredStrategy's cross
      device ops.
Toby Boyd's avatar
Toby Boyd committed
82
83
    tf_gpu_thread_mode: gpu_private triggers us of private thread pool.
    datasets_num_private_threads: Number of private threads for datasets.
Toby Boyd's avatar
Toby Boyd committed
84
    datasets_num_parallel_batches: Determines how many batches to process in
Hongkun Yu's avatar
Hongkun Yu committed
85
      parallel when using map and batch from tf.data.
86
    fp16_implementation: Create fp16_implementation flag.
87
88
    loss_scale: Controls the loss scaling, normally for mixed-precision
      training. Can only be turned on if dtype is also True.
89
90
    tf_data_experimental_slack: Determines whether to enable tf.data's
      `experimental_slack` option.
Toby Boyd's avatar
Toby Boyd committed
91
    enable_xla: Determines if XLA (auto clustering) is turned on.
92
    training_dataset_cache: Whether to cache the training dataset on workers.
Hongkun Yu's avatar
Hongkun Yu committed
93
94
      Typically used to improve training performance when training data is in
      remote storage and can fit into worker memory.
Toby Boyd's avatar
Toby Boyd committed
95

96
97
98
99
100
101
102
  Returns:
    A list of flags for core.py to marks as key flags.
  """

  key_flags = []
  if num_parallel_calls:
    flags.DEFINE_integer(
Hongkun Yu's avatar
Hongkun Yu committed
103
104
        name="num_parallel_calls",
        short_name="npc",
Toby Boyd's avatar
Toby Boyd committed
105
        default=multiprocessing.cpu_count(),
106
107
108
109
110
111
112
113
        help=help_wrap("The number of records that are  processed in parallel "
                       "during input processing. This can be optimized per "
                       "data set but for generally homogeneous data sets, "
                       "should be approximately the number of available CPU "
                       "cores. (default behavior)"))

  if inter_op:
    flags.DEFINE_integer(
Hongkun Yu's avatar
Hongkun Yu committed
114
115
116
        name="inter_op_parallelism_threads",
        short_name="inter",
        default=0,
117
        help=help_wrap("Number of inter_op_parallelism_threads to use for CPU. "
Hongkun Yu's avatar
Hongkun Yu committed
118
                       "See TensorFlow config.proto for details."))
119
120
121

  if intra_op:
    flags.DEFINE_integer(
Hongkun Yu's avatar
Hongkun Yu committed
122
123
124
        name="intra_op_parallelism_threads",
        short_name="intra",
        default=0,
125
126
127
128
129
        help=help_wrap("Number of intra_op_parallelism_threads to use for CPU. "
                       "See TensorFlow config.proto for details."))

  if synthetic_data:
    flags.DEFINE_bool(
Hongkun Yu's avatar
Hongkun Yu committed
130
131
132
        name="use_synthetic_data",
        short_name="synth",
        default=False,
133
134
135
136
137
138
139
        help=help_wrap(
            "If set, use fake data (zeroes) instead of a real dataset. "
            "This mode is useful for performance debugging, as it removes "
            "input processing steps, but will not learn anything."))

  if max_train_steps:
    flags.DEFINE_integer(
Hongkun Yu's avatar
Hongkun Yu committed
140
141
142
143
        name="max_train_steps",
        short_name="mts",
        default=None,
        help=help_wrap(
144
145
146
            "The model will stop training if the global_step reaches this "
            "value. If not set, training will run until the specified number "
            "of epochs have run as usual. It is generally recommended to set "
Hongkun Yu's avatar
Hongkun Yu committed
147
            "--train_epochs=1 when using this flag."))
148
149
150

  if dtype:
    flags.DEFINE_enum(
Hongkun Yu's avatar
Hongkun Yu committed
151
152
153
        name="dtype",
        short_name="dt",
        default="fp32",
154
155
        enum_values=DTYPE_MAP.keys(),
        help=help_wrap("The TensorFlow datatype used for calculations. "
156
157
158
                       "For 16-bit dtypes, variables and certain ops will "
                       "still be float32 for numeric stability."))

159
160
    if loss_scale:
      flags.DEFINE_string(
Hongkun Yu's avatar
Hongkun Yu committed
161
162
163
          name="loss_scale",
          short_name="ls",
          default=None,
164
165
166
167
168
169
170
171
172
173
174
175
176
          help=help_wrap(
              "The amount to scale the loss by when --dtype=fp16. This can be "
              "an int/float or the string 'dynamic'. Before gradients are "
              "computed, the loss is multiplied by the loss scale, making all "
              "gradients loss_scale times larger. To adjust for this, "
              "gradients are divided by the loss scale before being applied to "
              "variables. This is mathematically equivalent to training "
              "without a loss scale, but the loss scale helps avoid some "
              "intermediate gradients from underflowing to zero. The default "
              "is 'dynamic', which dynamic determines the optimal loss scale "
              "during training."))

      # pylint: disable=unused-variable
Hongkun Yu's avatar
Hongkun Yu committed
177
      @flags.validator(
178
179
180
181
          flag_name="loss_scale",
          message="loss_scale should be a positive int/float or the string "
                  "'dynamic'.")
      def _check_loss_scale(loss_scale):
182
183
184
        """Validator to check the loss scale flag is valid."""
        if loss_scale is None:
          return True  # null case is handled in get_loss_scale()
185

186
        if loss_scale == "dynamic":
187
          return True
188

189
190
191
192
        try:
          loss_scale = float(loss_scale)
        except ValueError:
          return False
193

194
        return loss_scale > 0
195
      # pylint: enable=unused-variable
196

197
198
    if fp16_implementation:
      flags.DEFINE_enum(
Hongkun Yu's avatar
Hongkun Yu committed
199
200
          name="fp16_implementation",
          default="keras",
201
          enum_values=("keras', 'graph_rewrite"),
202
203
          help=help_wrap(
              "When --dtype=fp16, how fp16 should be implemented. This has no "
204
205
              "impact on correctness. 'keras' uses the "
              "tf.keras.mixed_precision API. 'graph_rewrite' uses the "
206
207
              "tf.compat.v1.mixed_precision."
              "enable_mixed_precision_graph_rewrite API."))
208

Hongkun Yu's avatar
Hongkun Yu committed
209
210
      @flags.multi_flags_validator(
          ["fp16_implementation", "dtype", "loss_scale"])
211
212
      def _check_fp16_implementation(flags_dict):
        """Validator to check fp16_implementation flag is valid."""
213
214
215
216
        if (flags_dict["fp16_implementation"] == "graph_rewrite" and
            flags_dict["dtype"] != "fp16"):
          raise flags.ValidationError("--fp16_implementation should not be "
                                      "specified unless --dtype=fp16")
217
218
        return True

219
220
  if all_reduce_alg:
    flags.DEFINE_string(
Hongkun Yu's avatar
Hongkun Yu committed
221
222
223
        name="all_reduce_alg",
        short_name="ara",
        default=None,
224
        help=help_wrap("Defines the algorithm to use for performing all-reduce."
225
226
227
228
229
230
231
                       "When specified with MirroredStrategy for single "
                       "worker, this controls "
                       "tf.contrib.distribute.AllReduceCrossTowerOps.  When "
                       "specified with MultiWorkerMirroredStrategy, this "
                       "controls "
                       "tf.distribute.experimental.CollectiveCommunication; "
                       "valid options are `ring` and `nccl`."))
232

233
234
  if num_packs:
    flags.DEFINE_integer(
Hongkun Yu's avatar
Hongkun Yu committed
235
236
        name="num_packs",
        default=1,
237
238
239
240
        help=help_wrap("Sets `num_packs` in the cross device ops used in "
                       "MirroredStrategy.  For details, see "
                       "tf.distribute.NcclAllReduce."))

Toby Boyd's avatar
Toby Boyd committed
241
242
  if tf_gpu_thread_mode:
    flags.DEFINE_string(
Hongkun Yu's avatar
Hongkun Yu committed
243
244
245
        name="tf_gpu_thread_mode",
        short_name="gt_mode",
        default=None,
Toby Boyd's avatar
Toby Boyd committed
246
        help=help_wrap(
Hongkun Yu's avatar
Hongkun Yu committed
247
            "Whether and how the GPU device uses its own threadpool."))
Toby Boyd's avatar
Toby Boyd committed
248

249
    flags.DEFINE_integer(
Hongkun Yu's avatar
Hongkun Yu committed
250
251
252
253
254
        name="per_gpu_thread_count",
        short_name="pgtc",
        default=0,
        help=help_wrap("The number of threads to use for GPU. Only valid when "
                       "tf_gpu_thread_mode is not global."))
255

Toby Boyd's avatar
Toby Boyd committed
256
257
  if datasets_num_private_threads:
    flags.DEFINE_integer(
Toby Boyd's avatar
Toby Boyd committed
258
        name="datasets_num_private_threads",
Toby Boyd's avatar
Toby Boyd committed
259
260
261
        default=None,
        help=help_wrap(
            "Number of threads for a private threadpool created for all"
Hongkun Yu's avatar
Hongkun Yu committed
262
            "datasets computation.."))
263

Toby Boyd's avatar
Toby Boyd committed
264
265
266
267
268
269
  if datasets_num_parallel_batches:
    flags.DEFINE_integer(
        name="datasets_num_parallel_batches",
        default=None,
        help=help_wrap(
            "Determines how many batches to process in parallel when using "
Hongkun Yu's avatar
Hongkun Yu committed
270
            "map and batch from tf.data."))
Toby Boyd's avatar
Toby Boyd committed
271

272
273
274
275
276
277
278
  if training_dataset_cache:
    flags.DEFINE_boolean(
        name="training_dataset_cache",
        default=False,
        help=help_wrap(
            "Determines whether to cache the training dataset on workers. "
            "Typically used to improve training performance when training "
Hongkun Yu's avatar
Hongkun Yu committed
279
            "data is in remote storage and can fit into worker memory."))
280

281
282
283
284
285
  if tf_data_experimental_slack:
    flags.DEFINE_boolean(
        name="tf_data_experimental_slack",
        default=False,
        help=help_wrap(
Hongkun Yu's avatar
Hongkun Yu committed
286
            "Whether to enable tf.data's `experimental_slack` option."))
287

Toby Boyd's avatar
Toby Boyd committed
288
289
  if enable_xla:
    flags.DEFINE_boolean(
Hongkun Yu's avatar
Hongkun Yu committed
290
291
        name="enable_xla",
        default=False,
Toby Boyd's avatar
Toby Boyd committed
292
293
        help="Whether to enable XLA auto jit compilation")

294
  return key_flags