Commit bac0cbdf authored by Chen Qian's avatar Chen Qian Committed by A. Unique TensorFlower
Browse files

Internal change

PiperOrigin-RevId: 466508813
parent 549d620f
...@@ -21,7 +21,7 @@ import tensorflow as tf ...@@ -21,7 +21,7 @@ import tensorflow as tf
# pylint: disable=protected-access # pylint: disable=protected-access
class ExponentialMovingAverage(tf.keras.optimizers.Optimizer): class ExponentialMovingAverage(tf.keras.optimizers.legacy.Optimizer):
"""Optimizer that computes an exponential moving average of the variables. """Optimizer that computes an exponential moving average of the variables.
Empirically it has been found that using the moving average of the trained Empirically it has been found that using the moving average of the trained
......
...@@ -22,7 +22,7 @@ import tensorflow as tf ...@@ -22,7 +22,7 @@ import tensorflow as tf
# pylint: disable=protected-access # pylint: disable=protected-access
class LARS(tf.keras.optimizers.Optimizer): class LARS(tf.keras.optimizers.legacy.Optimizer):
"""Layer-wise Adaptive Rate Scaling for large batch training. """Layer-wise Adaptive Rate Scaling for large batch training.
Introduced by "Large Batch Training of Convolutional Networks" by Y. You, Introduced by "Large Batch Training of Convolutional Networks" by Y. You,
......
...@@ -20,7 +20,7 @@ from absl import logging ...@@ -20,7 +20,7 @@ from absl import logging
import tensorflow as tf import tensorflow as tf
class AdamWeightDecay(tf.keras.optimizers.Adam): class AdamWeightDecay(tf.keras.optimizers.legacy.Adam):
"""Adam enables L2 weight decay and clip_by_global_norm on gradients. """Adam enables L2 weight decay and clip_by_global_norm on gradients.
[Warning!]: Keras optimizer supports gradient clipping and has an AdamW [Warning!]: Keras optimizer supports gradient clipping and has an AdamW
......
...@@ -43,7 +43,7 @@ def _var_key(var): ...@@ -43,7 +43,7 @@ def _var_key(var):
return var._unique_id return var._unique_id
class SGDTorch(tf.keras.optimizers.Optimizer): class SGDTorch(tf.keras.optimizers.legacy.Optimizer):
"""Optimizer that simulates the SGD module used in pytorch. """Optimizer that simulates the SGD module used in pytorch.
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment