Commit 7f0ee4cb authored by Yuexin Wu's avatar Yuexin Wu Committed by A. Unique TensorFlower
Browse files

Correct description typos.

PiperOrigin-RevId: 372738818
parent afe4802e
......@@ -154,11 +154,11 @@ class PowerAndLinearDecayLrConfig(base_config.Config):
1) offset_step < 0, the actual learning rate equals initial_learning_rate.
2) offset_step <= total_decay_steps * (1 - linear_decay_fraction), the
actual learning rate equals lr * offset_step^power.
3) total_decay_steps * (1 - linear_decay_fraction) < offset_step <
3) total_decay_steps * (1 - linear_decay_fraction) <= offset_step <
total_decay_steps, the actual learning rate equals lr * offset_step^power *
(total_decay_steps - offset_step) / (total_decay_steps *
linear_decay_fraction).
4) offset_step > total_decay_steps, the actual learning rate equals zero.
4) offset_step >= total_decay_steps, the actual learning rate equals zero.
Attributes:
name: The name of the learning rate schedule. Defaults to
......
......@@ -57,7 +57,7 @@ class LrConfig(oneof.OneOfConfig):
"""Configuration for lr schedule.
Attributes:
type: 'str', type of lr schedule to be used, on the of fields below.
type: 'str', type of lr schedule to be used, one of the fields below.
constant: constant learning rate config.
stepwise: stepwise learning rate config.
exponential: exponential learning rate config.
......@@ -86,7 +86,7 @@ class WarmupConfig(oneof.OneOfConfig):
"""Configuration for lr schedule.
Attributes:
type: 'str', type of warmup schedule to be used, on the of fields below.
type: 'str', type of warmup schedule to be used, one of the fields below.
linear: linear warmup config.
polynomial: polynomial warmup config.
"""
......
......@@ -205,11 +205,11 @@ class PowerAndLinearDecay(tf.keras.optimizers.schedules.LearningRateSchedule):
1) offset_step < 0, the actual learning rate equals initial_learning_rate.
2) offset_step <= total_decay_steps * (1 - linear_decay_fraction), the
actual learning rate equals lr * offset_step^power.
3) total_decay_steps * (1 - linear_decay_fraction) < offset_step <
3) total_decay_steps * (1 - linear_decay_fraction) <= offset_step <
total_decay_steps, the actual learning rate equals lr * offset_step^power *
(total_decay_steps - offset_step) / (total_decay_steps *
linear_decay_fraction).
4) offset_step > total_decay_steps, the actual learning rate equals zero.
4) offset_step >= total_decay_steps, the actual learning rate equals zero.
"""
def __init__(self,
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment