optimization.py 9.44 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions and classes related to optimization (weight updates)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import re

22
from absl import logging
Hongkun Yu's avatar
Hongkun Yu committed
23
import gin
Hongkun Yu's avatar
Hongkun Yu committed
24
import tensorflow as tf
25
import tensorflow_addons.optimizers as tfa_optimizers
26
27
28


class WarmUp(tf.keras.optimizers.schedules.LearningRateSchedule):
A. Unique TensorFlower's avatar
A. Unique TensorFlower committed
29
  """Applies a warmup schedule on a given learning rate decay schedule."""
30

31
32
33
34
35
36
  def __init__(self,
               initial_learning_rate,
               decay_schedule_fn,
               warmup_steps,
               power=1.0,
               name=None):
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
    super(WarmUp, self).__init__()
    self.initial_learning_rate = initial_learning_rate
    self.warmup_steps = warmup_steps
    self.power = power
    self.decay_schedule_fn = decay_schedule_fn
    self.name = name

  def __call__(self, step):
    with tf.name_scope(self.name or 'WarmUp') as name:
      # Implements polynomial warmup. i.e., if global_step < warmup_steps, the
      # learning rate will be `global_step/num_warmup_steps * init_lr`.
      global_step_float = tf.cast(step, tf.float32)
      warmup_steps_float = tf.cast(self.warmup_steps, tf.float32)
      warmup_percent_done = global_step_float / warmup_steps_float
      warmup_learning_rate = (
          self.initial_learning_rate *
          tf.math.pow(warmup_percent_done, self.power))
54
55
56
57
58
      return tf.cond(
          global_step_float < warmup_steps_float,
          lambda: warmup_learning_rate,
          lambda: self.decay_schedule_fn(step),
          name=name)
59
60
61
62
63
64
65
66
67
68
69

  def get_config(self):
    return {
        'initial_learning_rate': self.initial_learning_rate,
        'decay_schedule_fn': self.decay_schedule_fn,
        'warmup_steps': self.warmup_steps,
        'power': self.power,
        'name': self.name
    }


Hongkun Yu's avatar
Hongkun Yu committed
70
@gin.configurable
71
72
73
def create_optimizer(init_lr,
                     num_train_steps,
                     num_warmup_steps,
74
                     end_lr=0.0,
75
                     optimizer_type='adamw'):
76
77
  """Creates an optimizer with learning rate schedule."""
  # Implements linear decay of the learning rate.
78
  lr_schedule = tf.keras.optimizers.schedules.PolynomialDecay(
79
80
      initial_learning_rate=init_lr,
      decay_steps=num_train_steps,
81
      end_learning_rate=end_lr)
82
  if num_warmup_steps:
83
84
85
86
    lr_schedule = WarmUp(
        initial_learning_rate=init_lr,
        decay_schedule_fn=lr_schedule,
        warmup_steps=num_warmup_steps)
87
88
89
90

  if optimizer_type == 'adamw':
    logging.info('using Adamw optimizer')
    optimizer = AdamWeightDecay(
91
        learning_rate=lr_schedule,
92
93
94
95
        weight_decay_rate=0.01,
        beta_1=0.9,
        beta_2=0.999,
        epsilon=1e-6,
A. Unique TensorFlower's avatar
A. Unique TensorFlower committed
96
        exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias'])
97
98
99
  elif optimizer_type == 'lamb':
    logging.info('using Lamb optimizer')
    optimizer = tfa_optimizers.LAMB(
100
        learning_rate=lr_schedule,
101
102
103
104
        weight_decay_rate=0.01,
        beta_1=0.9,
        beta_2=0.999,
        epsilon=1e-6,
A. Unique TensorFlower's avatar
A. Unique TensorFlower committed
105
        exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias'])
106
107
108
  else:
    raise ValueError('Unsupported optimizer type: ', optimizer_type)

109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
  return optimizer


class AdamWeightDecay(tf.keras.optimizers.Adam):
  """Adam enables L2 weight decay and clip_by_global_norm on gradients.

  Just adding the square of the weights to the loss function is *not* the
  correct way of using L2 regularization/weight decay with Adam, since that will
  interact with the m and v parameters in strange ways.

  Instead we want ot decay the weights in a manner that doesn't interact with
  the m/v parameters. This is equivalent to adding the square of the weights to
  the loss with plain (non-momentum) SGD.
  """

  def __init__(self,
               learning_rate=0.001,
               beta_1=0.9,
               beta_2=0.999,
               epsilon=1e-7,
               amsgrad=False,
               weight_decay_rate=0.0,
A. Unique TensorFlower's avatar
A. Unique TensorFlower committed
131
               include_in_weight_decay=None,
132
               exclude_from_weight_decay=None,
Hongkun Yu's avatar
Hongkun Yu committed
133
               gradient_clip_norm=1.0,
134
135
               name='AdamWeightDecay',
               **kwargs):
136
137
    super(AdamWeightDecay, self).__init__(learning_rate, beta_1, beta_2,
                                          epsilon, amsgrad, name, **kwargs)
138
    self.weight_decay_rate = weight_decay_rate
Hongkun Yu's avatar
Hongkun Yu committed
139
    self.gradient_clip_norm = gradient_clip_norm
A. Unique TensorFlower's avatar
A. Unique TensorFlower committed
140
    self._include_in_weight_decay = include_in_weight_decay
141
    self._exclude_from_weight_decay = exclude_from_weight_decay
Hongkun Yu's avatar
Hongkun Yu committed
142
    logging.info('gradient_clip_norm=%f', gradient_clip_norm)
143
144
145
146
147
148
149
150

  @classmethod
  def from_config(cls, config):
    """Creates an optimizer from its config with WarmUp custom object."""
    custom_objects = {'WarmUp': WarmUp}
    return super(AdamWeightDecay, cls).from_config(
        config, custom_objects=custom_objects)

151
152
153
  def _prepare_local(self, var_device, var_dtype, apply_state):
    super(AdamWeightDecay, self)._prepare_local(var_device, var_dtype,
                                                apply_state)
Scott Zhu's avatar
Scott Zhu committed
154
    apply_state[(var_device, var_dtype)]['weight_decay_rate'] = tf.constant(
155
156
157
        self.weight_decay_rate, name='adam_weight_decay_rate')

  def _decay_weights_op(self, var, learning_rate, apply_state):
158
159
160
161
    do_decay = self._do_use_weight_decay(var.name)
    if do_decay:
      return var.assign_sub(
          learning_rate * var *
Scott Zhu's avatar
Scott Zhu committed
162
          apply_state[(var.device, var.dtype.base_dtype)]['weight_decay_rate'],
163
164
165
          use_locking=self._use_locking)
    return tf.no_op()

Zongwei Zhou's avatar
Zongwei Zhou committed
166
167
168
  def apply_gradients(self,
                      grads_and_vars,
                      name=None,
169
                      experimental_aggregate_gradients=True):
170
    grads, tvars = list(zip(*grads_and_vars))
Hongkun Yu's avatar
Hongkun Yu committed
171
    if experimental_aggregate_gradients and self.gradient_clip_norm > 0.0:
172
173
174
175
176
      # when experimental_aggregate_gradients = False, apply_gradients() no
      # longer implicitly allreduce gradients, users manually allreduce gradient
      # and passed the allreduced grads_and_vars. For now, the
      # clip_by_global_norm will be moved to before the explicit allreduce to
      # keep the math the same as TF 1 and pre TF 2.2 implementation.
Zongwei Zhou's avatar
Zongwei Zhou committed
177
      (grads, _) = tf.clip_by_global_norm(grads, clip_norm=1.0)
Zongwei Zhou's avatar
Zongwei Zhou committed
178
179
180
    return super(AdamWeightDecay, self).apply_gradients(
        zip(grads, tvars),
        name=name,
181
        experimental_aggregate_gradients=experimental_aggregate_gradients)
182

183
  def _get_lr(self, var_device, var_dtype, apply_state):
184
    """Retrieves the learning rate with the given state."""
185
186
    if apply_state is None:
      return self._decayed_lr_t[var_dtype], {}
187

188
189
190
191
192
    apply_state = apply_state or {}
    coefficients = apply_state.get((var_device, var_dtype))
    if coefficients is None:
      coefficients = self._fallback_apply_state(var_device, var_dtype)
      apply_state[(var_device, var_dtype)] = coefficients
193

194
195
196
    return coefficients['lr_t'], dict(apply_state=apply_state)

  def _resource_apply_dense(self, grad, var, apply_state=None):
Ruoxin Sang's avatar
Ruoxin Sang committed
197
198
199
200
201
    # As the weight decay doesn't take any tensors from forward pass as inputs,
    # add a control dependency here to make sure it happens strictly in the
    # backward pass.
    # TODO(b/171088214): Remove it after the control dependency in
    # nested function is fixed.
202
    with tf.control_dependencies([tf.identity(grad)]):
Ruoxin Sang's avatar
Ruoxin Sang committed
203
204
      lr_t, kwargs = self._get_lr(var.device, var.dtype.base_dtype, apply_state)
      decay = self._decay_weights_op(var, lr_t, apply_state)
205
    with tf.control_dependencies([decay]):
206
207
      return super(AdamWeightDecay,
                   self)._resource_apply_dense(grad, var, **kwargs)
208

209
  def _resource_apply_sparse(self, grad, var, indices, apply_state=None):
Ruoxin Sang's avatar
Ruoxin Sang committed
210
211
212
213
214
    # As the weight decay doesn't take any tensors from forward pass as inputs,
    # add a control dependency here to make sure it happens strictly in the
    # backward pass.
    # TODO(b/171088214): Remove it after the control dependency in
    # nested function is fixed.
215
    with tf.control_dependencies([tf.identity(grad)]):
Ruoxin Sang's avatar
Ruoxin Sang committed
216
217
      lr_t, kwargs = self._get_lr(var.device, var.dtype.base_dtype, apply_state)
      decay = self._decay_weights_op(var, lr_t, apply_state)
218
    with tf.control_dependencies([decay]):
219
220
      return super(AdamWeightDecay,
                   self)._resource_apply_sparse(grad, var, indices, **kwargs)
221
222
223
224

  def get_config(self):
    config = super(AdamWeightDecay, self).get_config()
    config.update({
225
        'weight_decay_rate': self.weight_decay_rate,
226
227
228
229
230
    })
    return config

  def _do_use_weight_decay(self, param_name):
    """Whether to use L2 weight decay for `param_name`."""
231
232
    if self.weight_decay_rate == 0:
      return False
A. Unique TensorFlower's avatar
A. Unique TensorFlower committed
233
234
235
236
237
238

    if self._include_in_weight_decay:
      for r in self._include_in_weight_decay:
        if re.search(r, param_name) is not None:
          return True

239
240
241
242
243
    if self._exclude_from_weight_decay:
      for r in self._exclude_from_weight_decay:
        if re.search(r, param_name) is not None:
          return False
    return True