Commit e2a31b15 authored by A. Unique TensorFlower's avatar A. Unique TensorFlower
Browse files

Merge pull request #9575 from SamuelMarks:args-for-google-style-docstrings-official

PiperOrigin-RevId: 348853056
parents 584b5f29 90979a21
......@@ -52,7 +52,7 @@ def _create_causal_attention_mask(
We then flip the matrix values in order to match the representation where
real values are 1s.
Arguments:
Args:
seq_length: int, The length of each sequence.
memory_length: int, The length of memory blocks.
dtype: dtype of the mask.
......@@ -392,7 +392,7 @@ class RelativePositionEncoding(tf.keras.layers.Layer):
def call(self, pos_seq, batch_size=None):
"""Implements call() for the layer.
Arguments:
Args:
pos_seq: A 1-D `Tensor`
batch_size: The optionally provided batch size that tiles the relative
positional encoding.
......
......@@ -30,7 +30,7 @@ class BigBirdEncoder(tf.keras.Model):
*Note* that the network is constructed by
[Keras Functional API](https://keras.io/guides/functional_api/).
Arguments:
Args:
vocab_size: The size of the token vocabulary.
hidden_size: The size of the transformer hidden layers.
num_layers: The number of transformer layers.
......
......@@ -114,7 +114,7 @@ class RelativePositionEncoding(tf.keras.layers.Layer):
def call(self, pos_seq, batch_size=None):
"""Implements call() for the layer.
Arguments:
Args:
pos_seq: A 1-D `Tensor`
batch_size: The optionally provided batch size that tiles the relative
positional encoding.
......
......@@ -57,7 +57,7 @@ def _filter_and_allreduce_gradients(grads_and_vars,
The allreduced gradients are then passed to optimizer.apply_gradients(
experimental_aggregate_gradients=False).
Arguments:
Args:
grads_and_vars: gradients and variables pairs.
allreduce_precision: Whether to allreduce gradients in float32 or float16.
bytes_per_pack: A non-negative integer. Breaks collective operations into
......@@ -101,7 +101,7 @@ def minimize_using_explicit_allreduce(tape,
For TPU and GPU training using FP32, explicit allreduce will aggregate
gradients in FP32 format.
Arguments:
Args:
tape: An instance of `tf.GradientTape`.
optimizer: An instance of `tf.keras.optimizers.Optimizer`.
loss: the loss tensor.
......
......@@ -68,7 +68,7 @@ class FocalLoss(tf.keras.losses.Loss):
name=None):
"""Initializes `FocalLoss`.
Arguments:
Args:
alpha: The `alpha` weight factor for binary class imbalance.
gamma: The `gamma` focusing parameter to re-weight loss.
num_classes: Number of foreground classes.
......@@ -91,7 +91,7 @@ class FocalLoss(tf.keras.losses.Loss):
def call(self, y_true, y_pred):
"""Invokes the `FocalLoss`.
Arguments:
Args:
y_true: Ordered Dict with level to [batch, height, width, num_anchors].
for example,
{3: tf.Tensor(shape=[32, 512, 512, 9], dtype=tf.float32),
......@@ -143,7 +143,7 @@ class RetinanetBoxLoss(tf.keras.losses.Loss):
name=None):
"""Initializes `RetinanetBoxLoss`.
Arguments:
Args:
delta: A float, the point where the Huber loss function changes from a
quadratic to linear.
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to
......@@ -167,7 +167,7 @@ class RetinanetBoxLoss(tf.keras.losses.Loss):
Computes total detection loss including box and class loss from all levels.
Arguments:
Args:
y_true: Ordered Dict with level to [batch, height, width,
num_anchors * 4] for example,
{3: tf.Tensor(shape=[32, 512, 512, 9 * 4], dtype=tf.float32),
......
......@@ -108,7 +108,7 @@ class SummaryWriter(object):
def __init__(self, model_dir: Text, name: Text):
"""Inits SummaryWriter with paths.
Arguments:
Args:
model_dir: the model folder path.
name: the summary subfolder name.
"""
......
......@@ -42,7 +42,7 @@ class SpatialPyramidPooling(tf.keras.layers.Layer):
**kwargs):
"""Initializes `SpatialPyramidPooling`.
Arguments:
Args:
output_channels: Number of channels produced by SpatialPyramidPooling.
dilation_rates: A list of integers for parallel dilated conv.
pool_kernel_size: A list of integers or None. If None, global average
......
......@@ -31,7 +31,7 @@ class FocalLoss(tf.keras.losses.Loss):
name=None):
"""Initializes `FocalLoss`.
Arguments:
Args:
alpha: The `alpha` weight factor for binary class imbalance.
gamma: The `gamma` focusing parameter to re-weight loss.
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to
......@@ -52,7 +52,7 @@ class FocalLoss(tf.keras.losses.Loss):
def call(self, y_true, y_pred):
"""Invokes the `FocalLoss`.
Arguments:
Args:
y_true: A tensor of size [batch, num_anchors, num_classes]
y_pred: A tensor of size [batch, num_anchors, num_classes]
......
......@@ -20,7 +20,7 @@ import tensorflow as tf
def multi_level_flatten(multi_level_inputs, last_dim=None):
"""Flattens a multi-level input.
Arguments:
Args:
multi_level_inputs: Ordered Dict with level to [batch, d1, ..., dm].
last_dim: Whether the output should be [batch_size, None], or [batch_size,
None, last_dim]. Defaults to `None`.
......
......@@ -48,7 +48,7 @@ class PerClassIoU(tf.keras.metrics.Metric):
def __init__(self, num_classes, name=None, dtype=None):
"""Initializes `PerClassIoU`.
Arguments:
Args:
num_classes: The possible number of labels the prediction task can have.
This value must be provided, since a confusion matrix of dimension =
[num_classes, num_classes] will be allocated.
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment