"examples/vscode:/vscode.git/clone" did not exist on "f5c9be3a0aef60e68e1080b45c4ddf423f868fc4"
Commit ebf268b6 authored by Yuexin Wu's avatar Yuexin Wu Committed by A. Unique TensorFlower
Browse files

Correct description typos.

PiperOrigin-RevId: 372738818
parent 0fdbf1bd
...@@ -154,11 +154,11 @@ class PowerAndLinearDecayLrConfig(base_config.Config): ...@@ -154,11 +154,11 @@ class PowerAndLinearDecayLrConfig(base_config.Config):
1) offset_step < 0, the actual learning rate equals initial_learning_rate. 1) offset_step < 0, the actual learning rate equals initial_learning_rate.
2) offset_step <= total_decay_steps * (1 - linear_decay_fraction), the 2) offset_step <= total_decay_steps * (1 - linear_decay_fraction), the
actual learning rate equals lr * offset_step^power. actual learning rate equals lr * offset_step^power.
3) total_decay_steps * (1 - linear_decay_fraction) < offset_step < 3) total_decay_steps * (1 - linear_decay_fraction) <= offset_step <
total_decay_steps, the actual learning rate equals lr * offset_step^power * total_decay_steps, the actual learning rate equals lr * offset_step^power *
(total_decay_steps - offset_step) / (total_decay_steps * (total_decay_steps - offset_step) / (total_decay_steps *
linear_decay_fraction). linear_decay_fraction).
4) offset_step > total_decay_steps, the actual learning rate equals zero. 4) offset_step >= total_decay_steps, the actual learning rate equals zero.
Attributes: Attributes:
name: The name of the learning rate schedule. Defaults to name: The name of the learning rate schedule. Defaults to
......
...@@ -57,7 +57,7 @@ class LrConfig(oneof.OneOfConfig): ...@@ -57,7 +57,7 @@ class LrConfig(oneof.OneOfConfig):
"""Configuration for lr schedule. """Configuration for lr schedule.
Attributes: Attributes:
type: 'str', type of lr schedule to be used, on the of fields below. type: 'str', type of lr schedule to be used, one of the fields below.
constant: constant learning rate config. constant: constant learning rate config.
stepwise: stepwise learning rate config. stepwise: stepwise learning rate config.
exponential: exponential learning rate config. exponential: exponential learning rate config.
...@@ -86,7 +86,7 @@ class WarmupConfig(oneof.OneOfConfig): ...@@ -86,7 +86,7 @@ class WarmupConfig(oneof.OneOfConfig):
"""Configuration for lr schedule. """Configuration for lr schedule.
Attributes: Attributes:
type: 'str', type of warmup schedule to be used, on the of fields below. type: 'str', type of warmup schedule to be used, one of the fields below.
linear: linear warmup config. linear: linear warmup config.
polynomial: polynomial warmup config. polynomial: polynomial warmup config.
""" """
......
...@@ -205,11 +205,11 @@ class PowerAndLinearDecay(tf.keras.optimizers.schedules.LearningRateSchedule): ...@@ -205,11 +205,11 @@ class PowerAndLinearDecay(tf.keras.optimizers.schedules.LearningRateSchedule):
1) offset_step < 0, the actual learning rate equals initial_learning_rate. 1) offset_step < 0, the actual learning rate equals initial_learning_rate.
2) offset_step <= total_decay_steps * (1 - linear_decay_fraction), the 2) offset_step <= total_decay_steps * (1 - linear_decay_fraction), the
actual learning rate equals lr * offset_step^power. actual learning rate equals lr * offset_step^power.
3) total_decay_steps * (1 - linear_decay_fraction) < offset_step < 3) total_decay_steps * (1 - linear_decay_fraction) <= offset_step <
total_decay_steps, the actual learning rate equals lr * offset_step^power * total_decay_steps, the actual learning rate equals lr * offset_step^power *
(total_decay_steps - offset_step) / (total_decay_steps * (total_decay_steps - offset_step) / (total_decay_steps *
linear_decay_fraction). linear_decay_fraction).
4) offset_step > total_decay_steps, the actual learning rate equals zero. 4) offset_step >= total_decay_steps, the actual learning rate equals zero.
""" """
def __init__(self, def __init__(self,
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment