optimization.py 8.55 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions and classes related to optimization (weight updates)."""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import re

23
from absl import logging
Hongkun Yu's avatar
Hongkun Yu committed
24
import tensorflow as tf
25
import tensorflow_addons.optimizers as tfa_optimizers
26
27
28


class WarmUp(tf.keras.optimizers.schedules.LearningRateSchedule):
A. Unique TensorFlower's avatar
A. Unique TensorFlower committed
29
  """Applies a warmup schedule on a given learning rate decay schedule."""
30

31
32
33
34
35
36
  def __init__(self,
               initial_learning_rate,
               decay_schedule_fn,
               warmup_steps,
               power=1.0,
               name=None):
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
    super(WarmUp, self).__init__()
    self.initial_learning_rate = initial_learning_rate
    self.warmup_steps = warmup_steps
    self.power = power
    self.decay_schedule_fn = decay_schedule_fn
    self.name = name

  def __call__(self, step):
    with tf.name_scope(self.name or 'WarmUp') as name:
      # Implements polynomial warmup. i.e., if global_step < warmup_steps, the
      # learning rate will be `global_step/num_warmup_steps * init_lr`.
      global_step_float = tf.cast(step, tf.float32)
      warmup_steps_float = tf.cast(self.warmup_steps, tf.float32)
      warmup_percent_done = global_step_float / warmup_steps_float
      warmup_learning_rate = (
          self.initial_learning_rate *
          tf.math.pow(warmup_percent_done, self.power))
54
55
56
57
58
      return tf.cond(
          global_step_float < warmup_steps_float,
          lambda: warmup_learning_rate,
          lambda: self.decay_schedule_fn(step),
          name=name)
59
60
61
62
63
64
65
66
67
68
69

  def get_config(self):
    return {
        'initial_learning_rate': self.initial_learning_rate,
        'decay_schedule_fn': self.decay_schedule_fn,
        'warmup_steps': self.warmup_steps,
        'power': self.power,
        'name': self.name
    }


70
71
72
def create_optimizer(init_lr,
                     num_train_steps,
                     num_warmup_steps,
73
                     end_lr=0.0,
74
                     optimizer_type='adamw'):
75
76
  """Creates an optimizer with learning rate schedule."""
  # Implements linear decay of the learning rate.
77
  lr_schedule = tf.keras.optimizers.schedules.PolynomialDecay(
78
79
      initial_learning_rate=init_lr,
      decay_steps=num_train_steps,
80
      end_learning_rate=end_lr)
81
  if num_warmup_steps:
82
83
84
85
    lr_schedule = WarmUp(
        initial_learning_rate=init_lr,
        decay_schedule_fn=lr_schedule,
        warmup_steps=num_warmup_steps)
86
87
88
89

  if optimizer_type == 'adamw':
    logging.info('using Adamw optimizer')
    optimizer = AdamWeightDecay(
90
        learning_rate=lr_schedule,
91
92
93
94
95
96
97
98
        weight_decay_rate=0.01,
        beta_1=0.9,
        beta_2=0.999,
        epsilon=1e-6,
        exclude_from_weight_decay=['layer_norm', 'bias'])
  elif optimizer_type == 'lamb':
    logging.info('using Lamb optimizer')
    optimizer = tfa_optimizers.LAMB(
99
        learning_rate=lr_schedule,
100
101
102
103
104
105
106
107
        weight_decay_rate=0.01,
        beta_1=0.9,
        beta_2=0.999,
        epsilon=1e-6,
        exclude_from_weight_decay=['layer_norm', 'bias'])
  else:
    raise ValueError('Unsupported optimizer type: ', optimizer_type)

108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
  return optimizer


class AdamWeightDecay(tf.keras.optimizers.Adam):
  """Adam enables L2 weight decay and clip_by_global_norm on gradients.

  Just adding the square of the weights to the loss function is *not* the
  correct way of using L2 regularization/weight decay with Adam, since that will
  interact with the m and v parameters in strange ways.

  Instead we want ot decay the weights in a manner that doesn't interact with
  the m/v parameters. This is equivalent to adding the square of the weights to
  the loss with plain (non-momentum) SGD.
  """

  def __init__(self,
               learning_rate=0.001,
               beta_1=0.9,
               beta_2=0.999,
               epsilon=1e-7,
               amsgrad=False,
               weight_decay_rate=0.0,
A. Unique TensorFlower's avatar
A. Unique TensorFlower committed
130
               include_in_weight_decay=None,
131
132
133
               exclude_from_weight_decay=None,
               name='AdamWeightDecay',
               **kwargs):
134
135
    super(AdamWeightDecay, self).__init__(learning_rate, beta_1, beta_2,
                                          epsilon, amsgrad, name, **kwargs)
136
    self.weight_decay_rate = weight_decay_rate
A. Unique TensorFlower's avatar
A. Unique TensorFlower committed
137
    self._include_in_weight_decay = include_in_weight_decay
138
139
140
141
142
143
144
145
146
    self._exclude_from_weight_decay = exclude_from_weight_decay

  @classmethod
  def from_config(cls, config):
    """Creates an optimizer from its config with WarmUp custom object."""
    custom_objects = {'WarmUp': WarmUp}
    return super(AdamWeightDecay, cls).from_config(
        config, custom_objects=custom_objects)

147
148
149
  def _prepare_local(self, var_device, var_dtype, apply_state):
    super(AdamWeightDecay, self)._prepare_local(var_device, var_dtype,
                                                apply_state)
Scott Zhu's avatar
Scott Zhu committed
150
    apply_state[(var_device, var_dtype)]['weight_decay_rate'] = tf.constant(
151
152
153
        self.weight_decay_rate, name='adam_weight_decay_rate')

  def _decay_weights_op(self, var, learning_rate, apply_state):
154
155
156
157
    do_decay = self._do_use_weight_decay(var.name)
    if do_decay:
      return var.assign_sub(
          learning_rate * var *
Scott Zhu's avatar
Scott Zhu committed
158
          apply_state[(var.device, var.dtype.base_dtype)]['weight_decay_rate'],
159
160
161
          use_locking=self._use_locking)
    return tf.no_op()

Zongwei Zhou's avatar
Zongwei Zhou committed
162
163
164
  def apply_gradients(self,
                      grads_and_vars,
                      name=None,
165
                      experimental_aggregate_gradients=True):
166
    grads, tvars = list(zip(*grads_and_vars))
167
168
169
170
171
172
    if experimental_aggregate_gradients:
      # when experimental_aggregate_gradients = False, apply_gradients() no
      # longer implicitly allreduce gradients, users manually allreduce gradient
      # and passed the allreduced grads_and_vars. For now, the
      # clip_by_global_norm will be moved to before the explicit allreduce to
      # keep the math the same as TF 1 and pre TF 2.2 implementation.
Zongwei Zhou's avatar
Zongwei Zhou committed
173
      (grads, _) = tf.clip_by_global_norm(grads, clip_norm=1.0)
Zongwei Zhou's avatar
Zongwei Zhou committed
174
175
176
    return super(AdamWeightDecay, self).apply_gradients(
        zip(grads, tvars),
        name=name,
177
        experimental_aggregate_gradients=experimental_aggregate_gradients)
178

179
  def _get_lr(self, var_device, var_dtype, apply_state):
180
    """Retrieves the learning rate with the given state."""
181
182
    if apply_state is None:
      return self._decayed_lr_t[var_dtype], {}
183

184
185
186
187
188
    apply_state = apply_state or {}
    coefficients = apply_state.get((var_device, var_dtype))
    if coefficients is None:
      coefficients = self._fallback_apply_state(var_device, var_dtype)
      apply_state[(var_device, var_dtype)] = coefficients
189

190
191
192
193
    return coefficients['lr_t'], dict(apply_state=apply_state)

  def _resource_apply_dense(self, grad, var, apply_state=None):
    lr_t, kwargs = self._get_lr(var.device, var.dtype.base_dtype, apply_state)
194
195
    decay = self._decay_weights_op(var, lr_t, apply_state)
    with tf.control_dependencies([decay]):
196
197
      return super(AdamWeightDecay,
                   self)._resource_apply_dense(grad, var, **kwargs)
198

199
200
  def _resource_apply_sparse(self, grad, var, indices, apply_state=None):
    lr_t, kwargs = self._get_lr(var.device, var.dtype.base_dtype, apply_state)
201
202
    decay = self._decay_weights_op(var, lr_t, apply_state)
    with tf.control_dependencies([decay]):
203
204
      return super(AdamWeightDecay,
                   self)._resource_apply_sparse(grad, var, indices, **kwargs)
205
206
207
208

  def get_config(self):
    config = super(AdamWeightDecay, self).get_config()
    config.update({
209
        'weight_decay_rate': self.weight_decay_rate,
210
211
212
213
214
    })
    return config

  def _do_use_weight_decay(self, param_name):
    """Whether to use L2 weight decay for `param_name`."""
215
216
    if self.weight_decay_rate == 0:
      return False
A. Unique TensorFlower's avatar
A. Unique TensorFlower committed
217
218
219
220
221
222

    if self._include_in_weight_decay:
      for r in self._include_in_weight_decay:
        if re.search(r, param_name) is not None:
          return True

223
224
225
226
227
    if self._exclude_from_weight_decay:
      for r in self._exclude_from_weight_decay:
        if re.search(r, param_name) is not None:
          return False
    return True