"...text-generation-inference.git" did not exist on "954653466d24a9b3435988136983398bdf788a2f"
Commit c1f35955 authored by Yulv-git's avatar Yulv-git
Browse files

Fix some typos in official/.

parent 96a8d744
......@@ -43,8 +43,8 @@ def export_tfhub(model_path, hub_destination, model_name):
image_input = tf.keras.layers.Input(
shape=(None, None, 3), name="image_input", dtype=tf.float32)
x = image_input * 255.0
ouputs = efficientnet_model.efficientnet(x, config)
hub_model = tf.keras.Model(image_input, ouputs)
outputs = efficientnet_model.efficientnet(x, config)
hub_model = tf.keras.Model(image_input, outputs)
ckpt = tf.train.Checkpoint(model=hub_model)
ckpt.restore(model_path).assert_existing_objects_matched()
hub_model.save(
......
......@@ -188,7 +188,7 @@ def download_and_extract(path, url, input_filename, target_filename):
Full paths to extracted input and target files.
Raises:
OSError: if the the download/extraction fails.
OSError: if the download/extraction fails.
"""
# Check if extracted files already exist in path
input_file = find_file(path, input_filename)
......
......@@ -23,7 +23,7 @@ import tensorflow as tf
def _make_offset_wrapper(new_class_name: str, base_lr_class):
"""Generates a offset wrapper of learning rate schedule.
It will returns a subclass of the the `base_lr_class`, the subclass takes an
It will returns a subclass of the `base_lr_class`, the subclass takes an
`offset` argument in the constructor. When the new class instance is called,
the behavior is:
new_class_object(step) = base_lr_class_object(step - offset)
......
......@@ -36,7 +36,7 @@ from sentencepiece import SentencePieceTrainer
FLAGS = flags.FLAGS
flags.DEFINE_string("output_model_path", None,
"Path to save the the sentencepiece model.")
"Path to save the sentencepiece model.")
flags.mark_flag_as_required("output_model_path")
flags.DEFINE_string("tfds_dir", None, "Directory of the tfds.")
......
......@@ -105,7 +105,7 @@ pip3 install --user -r official/requirements.txt
<details>
This example fine-tunes BERT-base from TF-Hub on the the Multi-Genre Natural
This example fine-tunes BERT-base from TF-Hub on the Multi-Genre Natural
Language Inference (MultiNLI) corpus using TPUs.
Firstly, you can prepare the fine-tuning data using
......
......@@ -13,7 +13,7 @@ assemble new `tf.keras` layers or models.
["Big Bird: Transformers for Longer Sequences"](https://arxiv.org/abs/2007.14062).
* [CachedAttention](attention.py) implements an attention layer with cache
used for auto-agressive decoding.
used for auto-aggressive decoding.
* [KernelAttention](kernel_attention.py) implements a group of attention
mechansim that express the self-attention as a linear dot-product of
......
......@@ -226,7 +226,7 @@ class RandomFeatureGaussianProcess(tf.keras.layers.Layer):
"""Resets covariance matrix of the GP layer.
This function is useful for reseting the model's covariance matrix at the
begining of a new epoch.
beginning of a new epoch.
"""
self._gp_cov_layer.reset_precision_matrix()
......@@ -380,7 +380,7 @@ class LaplaceRandomFeatureCovariance(tf.keras.layers.Layer):
"""Resets precision matrix to its initial value.
This function is useful for reseting the model's covariance matrix at the
begining of a new epoch.
beginning of a new epoch.
"""
precision_matrix_reset_op = self.precision_matrix.assign(
self.initial_precision_matrix)
......
......@@ -465,7 +465,7 @@ class ReuseMultiHeadAttention(tf.keras.layers.Layer):
"""Builds multi-head dot-product attention computations.
This function builds attributes necessary for `_compute_attention` to
costomize attention computation to replace the default dot-product
customize attention computation to replace the default dot-product
attention.
Args:
......
......@@ -204,7 +204,7 @@ class SequenceBeamSearch(tf.Module):
candidate_log_probs = _log_prob_from_logits(logits)
# Calculate new log probabilities if each of the alive sequences were
# extended # by the the candidate IDs.
# extended # by the candidate IDs.
# Shape [batch_size, beam_size, vocab_size]
log_probs = candidate_log_probs + tf.expand_dims(alive_log_probs, axis=2)
......
......@@ -123,7 +123,7 @@ class EdgeTPUMultiHeadAttention(tf.keras.layers.MultiHeadAttention):
"""Builds multi-head dot-product attention computations.
This function builds attributes necessary for `_compute_attention` to
costomize attention computation to replace the default dot-product
customize attention computation to replace the default dot-product
attention.
Args:
......
......@@ -265,7 +265,7 @@ class EdgeTPUTask(base_task.Task):
"""Does forward and backward.
Args:
inputs: A tuple of of input tensors of (features, labels).
inputs: A tuple of input tensors of (features, labels).
model: A tf.keras.Model instance.
optimizer: The optimizer for this training step.
metrics: A nested structure of metrics objects.
......@@ -319,7 +319,7 @@ class EdgeTPUTask(base_task.Task):
"""Runs validatation step.
Args:
inputs: A tuple of of input tensors of (features, labels).
inputs: A tuple of input tensors of (features, labels).
model: A tf.keras.Model instance.
metrics: A nested structure of metrics objects.
......
......@@ -53,7 +53,7 @@ class VideoSSLModel(tf.keras.Model):
hidden_dim: `int` number of hidden units in MLP.
hidden_layer_num: `int` number of hidden layers in MLP.
hidden_norm_args: `dict` for batchnorm arguments in MLP.
projection_dim: `int` number of ouput dimension for MLP.
projection_dim: `int` number of output dimension for MLP.
input_specs: `tf.keras.layers.InputSpec` specs of the input tensor.
dropout_rate: `float` rate for dropout regularization.
aggregate_endpoints: `bool` aggregate all end ponits or only use the
......
......@@ -33,7 +33,7 @@ from official.vision.dataloaders import parser
def resize_axis(tensor, axis, new_size, fill_value=0):
"""Truncates or pads a tensor to new_size on on a given axis.
"""Truncates or pads a tensor to new_size on a given axis.
Truncate or extend tensor such that tensor.shape[axis] == new_size. If the
size increases, the padding will be performed at the end, using fill_value.
......
......@@ -22,7 +22,7 @@ heatmaps (one heatmap for each class) is needed to predict the object. CenterNet
proves that this can be done without a significant difference in accuracy.
## Enviroment setup
## Environment setup
The code can be run on multiple GPUs or TPUs with different distribution
strategies. See the TensorFlow distributed training
......
......@@ -10,7 +10,7 @@
An illustration of SimCLR (from <a href="https://ai.googleblog.com/2020/04/advancing-self-supervised-and-semi.html">our blog here</a>).
</div>
## Enviroment setup
## Environment setup
The code can be run on multiple GPUs or TPUs with different distribution
strategies. See the TensorFlow distributed training
......
......@@ -323,7 +323,7 @@ class DarknetLoss(YoloLossBase):
grid_points = tf.stop_gradient(grid_points)
anchor_grid = tf.stop_gradient(anchor_grid)
# Split all the ground truths to use as seperate items in loss computation.
# Split all the ground truths to use as separate items in loss computation.
(true_box, ind_mask, true_class) = tf.split(y_true, [4, 1, 1], axis=-1)
true_conf = tf.squeeze(true_conf, axis=-1)
true_class = tf.squeeze(true_class, axis=-1)
......
......@@ -823,7 +823,7 @@ class CSPStack(tf.keras.layers.Layer):
make it a cross stage partial. Added for ease of use. you should be able
to wrap any layer stack with a CSP independent of wether it belongs
to the Darknet family. if filter_scale = 2, then the blocks in the stack
passed into the the CSP stack should also have filters = filters/filter_scale
passed into the CSP stack should also have filters = filters/filter_scale
Cross Stage Partial networks (CSPNets) were proposed in:
[1] Chien-Yao Wang, Hong-Yuan Mark Liao, I-Hau Yeh, Yueh-Hua Wu,
......
......@@ -112,7 +112,7 @@ def generate_coco_panoptics_masks(segments_info, mask_path,
represent "stuff" and "things" classes respectively.
Returns:
A dict with with keys: [u'semantic_segmentation_mask', u'category_mask',
A dict with keys: [u'semantic_segmentation_mask', u'category_mask',
u'instance_mask']. The dict contains 'category_mask' and 'instance_mask'
only if `include_panoptic_eval_masks` is set to True.
"""
......
......@@ -138,7 +138,7 @@ class ExampleTask(base_task.Task):
between output from Parser and input used here.
Args:
inputs: A tuple of of input tensors of (features, labels).
inputs: A tuple of input tensors of (features, labels).
model: A tf.keras.Model instance.
optimizer: The optimizer for this training step.
metrics: A nested structure of metrics objects.
......@@ -186,7 +186,7 @@ class ExampleTask(base_task.Task):
"""Runs validatation step.
Args:
inputs: A tuple of of input tensors of (features, labels).
inputs: A tuple of input tensors of (features, labels).
model: A tf.keras.Model instance.
metrics: A nested structure of metrics objects.
......
......@@ -616,7 +616,7 @@ def bbox_overlap(boxes, gt_boxes):
tf.transpose(gt_invalid_mask, [0, 2, 1]))
iou = tf.where(padding_mask, -tf.ones_like(iou), iou)
# Fills -1 for for invalid (-1) boxes.
# Fills -1 for invalid (-1) boxes.
boxes_invalid_mask = tf.less(
tf.reduce_max(boxes, axis=-1, keepdims=True), 0.0)
iou = tf.where(boxes_invalid_mask, -tf.ones_like(iou), iou)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment