Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ModelZoo
ResNet50_tensorflow
Commits
c1f35955
Commit
c1f35955
authored
May 04, 2022
by
Yulv-git
Browse files
Fix some typos in official/.
parent
96a8d744
Changes
22
Show whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
24 additions
and
24 deletions
+24
-24
official/legacy/image_classification/efficientnet/tfhub_export.py
.../legacy/image_classification/efficientnet/tfhub_export.py
+2
-2
official/legacy/transformer/data_download.py
official/legacy/transformer/data_download.py
+1
-1
official/modeling/optimization/lr_schedule.py
official/modeling/optimization/lr_schedule.py
+1
-1
official/nlp/data/train_sentencepiece.py
official/nlp/data/train_sentencepiece.py
+1
-1
official/nlp/docs/train.md
official/nlp/docs/train.md
+1
-1
official/nlp/modeling/layers/README.md
official/nlp/modeling/layers/README.md
+1
-1
official/nlp/modeling/layers/gaussian_process.py
official/nlp/modeling/layers/gaussian_process.py
+2
-2
official/nlp/modeling/layers/reuse_attention.py
official/nlp/modeling/layers/reuse_attention.py
+1
-1
official/nlp/modeling/ops/beam_search.py
official/nlp/modeling/ops/beam_search.py
+1
-1
official/projects/edgetpu/nlp/modeling/edgetpu_layers.py
official/projects/edgetpu/nlp/modeling/edgetpu_layers.py
+1
-1
official/projects/edgetpu/vision/tasks/image_classification.py
...ial/projects/edgetpu/vision/tasks/image_classification.py
+2
-2
official/projects/video_ssl/modeling/video_ssl_model.py
official/projects/video_ssl/modeling/video_ssl_model.py
+1
-1
official/projects/yt8m/dataloaders/yt8m_input.py
official/projects/yt8m/dataloaders/yt8m_input.py
+1
-1
official/vision/beta/projects/centernet/README.md
official/vision/beta/projects/centernet/README.md
+1
-1
official/vision/beta/projects/simclr/README.md
official/vision/beta/projects/simclr/README.md
+1
-1
official/vision/beta/projects/yolo/losses/yolo_loss.py
official/vision/beta/projects/yolo/losses/yolo_loss.py
+1
-1
official/vision/beta/projects/yolo/modeling/layers/nn_blocks.py
...al/vision/beta/projects/yolo/modeling/layers/nn_blocks.py
+1
-1
official/vision/data/create_coco_tf_record.py
official/vision/data/create_coco_tf_record.py
+1
-1
official/vision/examples/starter/example_task.py
official/vision/examples/starter/example_task.py
+2
-2
official/vision/ops/box_ops.py
official/vision/ops/box_ops.py
+1
-1
No files found.
official/legacy/image_classification/efficientnet/tfhub_export.py
View file @
c1f35955
...
...
@@ -43,8 +43,8 @@ def export_tfhub(model_path, hub_destination, model_name):
image_input
=
tf
.
keras
.
layers
.
Input
(
shape
=
(
None
,
None
,
3
),
name
=
"image_input"
,
dtype
=
tf
.
float32
)
x
=
image_input
*
255.0
ouputs
=
efficientnet_model
.
efficientnet
(
x
,
config
)
hub_model
=
tf
.
keras
.
Model
(
image_input
,
ouputs
)
ou
t
puts
=
efficientnet_model
.
efficientnet
(
x
,
config
)
hub_model
=
tf
.
keras
.
Model
(
image_input
,
ou
t
puts
)
ckpt
=
tf
.
train
.
Checkpoint
(
model
=
hub_model
)
ckpt
.
restore
(
model_path
).
assert_existing_objects_matched
()
hub_model
.
save
(
...
...
official/legacy/transformer/data_download.py
View file @
c1f35955
...
...
@@ -188,7 +188,7 @@ def download_and_extract(path, url, input_filename, target_filename):
Full paths to extracted input and target files.
Raises:
OSError: if the
the
download/extraction fails.
OSError: if the download/extraction fails.
"""
# Check if extracted files already exist in path
input_file
=
find_file
(
path
,
input_filename
)
...
...
official/modeling/optimization/lr_schedule.py
View file @
c1f35955
...
...
@@ -23,7 +23,7 @@ import tensorflow as tf
def
_make_offset_wrapper
(
new_class_name
:
str
,
base_lr_class
):
"""Generates a offset wrapper of learning rate schedule.
It will returns a subclass of the
the
`base_lr_class`, the subclass takes an
It will returns a subclass of the `base_lr_class`, the subclass takes an
`offset` argument in the constructor. When the new class instance is called,
the behavior is:
new_class_object(step) = base_lr_class_object(step - offset)
...
...
official/nlp/data/train_sentencepiece.py
View file @
c1f35955
...
...
@@ -36,7 +36,7 @@ from sentencepiece import SentencePieceTrainer
FLAGS
=
flags
.
FLAGS
flags
.
DEFINE_string
(
"output_model_path"
,
None
,
"Path to save the
the
sentencepiece model."
)
"Path to save the sentencepiece model."
)
flags
.
mark_flag_as_required
(
"output_model_path"
)
flags
.
DEFINE_string
(
"tfds_dir"
,
None
,
"Directory of the tfds."
)
...
...
official/nlp/docs/train.md
View file @
c1f35955
...
...
@@ -105,7 +105,7 @@ pip3 install --user -r official/requirements.txt
<details>
This example fine-tunes BERT-base from TF-Hub on the
the
Multi-Genre Natural
This example fine-tunes BERT-base from TF-Hub on the Multi-Genre Natural
Language Inference (MultiNLI) corpus using TPUs.
Firstly, you can prepare the fine-tuning data using
...
...
official/nlp/modeling/layers/README.md
View file @
c1f35955
...
...
@@ -13,7 +13,7 @@ assemble new `tf.keras` layers or models.
[
"Big Bird: Transformers for Longer Sequences"
](
https://arxiv.org/abs/2007.14062
)
.
*
[
CachedAttention
](
attention.py
)
implements an attention layer with cache
used for auto-agressive decoding.
used for auto-ag
g
ressive decoding.
*
[
KernelAttention
](
kernel_attention.py
)
implements a group of attention
mechansim that express the self-attention as a linear dot-product of
...
...
official/nlp/modeling/layers/gaussian_process.py
View file @
c1f35955
...
...
@@ -226,7 +226,7 @@ class RandomFeatureGaussianProcess(tf.keras.layers.Layer):
"""Resets covariance matrix of the GP layer.
This function is useful for reseting the model's covariance matrix at the
begining of a new epoch.
begin
n
ing of a new epoch.
"""
self
.
_gp_cov_layer
.
reset_precision_matrix
()
...
...
@@ -380,7 +380,7 @@ class LaplaceRandomFeatureCovariance(tf.keras.layers.Layer):
"""Resets precision matrix to its initial value.
This function is useful for reseting the model's covariance matrix at the
begining of a new epoch.
begin
n
ing of a new epoch.
"""
precision_matrix_reset_op
=
self
.
precision_matrix
.
assign
(
self
.
initial_precision_matrix
)
...
...
official/nlp/modeling/layers/reuse_attention.py
View file @
c1f35955
...
...
@@ -465,7 +465,7 @@ class ReuseMultiHeadAttention(tf.keras.layers.Layer):
"""Builds multi-head dot-product attention computations.
This function builds attributes necessary for `_compute_attention` to
c
o
stomize attention computation to replace the default dot-product
c
u
stomize attention computation to replace the default dot-product
attention.
Args:
...
...
official/nlp/modeling/ops/beam_search.py
View file @
c1f35955
...
...
@@ -204,7 +204,7 @@ class SequenceBeamSearch(tf.Module):
candidate_log_probs
=
_log_prob_from_logits
(
logits
)
# Calculate new log probabilities if each of the alive sequences were
# extended # by the
the
candidate IDs.
# extended # by the candidate IDs.
# Shape [batch_size, beam_size, vocab_size]
log_probs
=
candidate_log_probs
+
tf
.
expand_dims
(
alive_log_probs
,
axis
=
2
)
...
...
official/projects/edgetpu/nlp/modeling/edgetpu_layers.py
View file @
c1f35955
...
...
@@ -123,7 +123,7 @@ class EdgeTPUMultiHeadAttention(tf.keras.layers.MultiHeadAttention):
"""Builds multi-head dot-product attention computations.
This function builds attributes necessary for `_compute_attention` to
c
o
stomize attention computation to replace the default dot-product
c
u
stomize attention computation to replace the default dot-product
attention.
Args:
...
...
official/projects/edgetpu/vision/tasks/image_classification.py
View file @
c1f35955
...
...
@@ -265,7 +265,7 @@ class EdgeTPUTask(base_task.Task):
"""Does forward and backward.
Args:
inputs: A tuple of
of
input tensors of (features, labels).
inputs: A tuple of input tensors of (features, labels).
model: A tf.keras.Model instance.
optimizer: The optimizer for this training step.
metrics: A nested structure of metrics objects.
...
...
@@ -319,7 +319,7 @@ class EdgeTPUTask(base_task.Task):
"""Runs validatation step.
Args:
inputs: A tuple of
of
input tensors of (features, labels).
inputs: A tuple of input tensors of (features, labels).
model: A tf.keras.Model instance.
metrics: A nested structure of metrics objects.
...
...
official/projects/video_ssl/modeling/video_ssl_model.py
View file @
c1f35955
...
...
@@ -53,7 +53,7 @@ class VideoSSLModel(tf.keras.Model):
hidden_dim: `int` number of hidden units in MLP.
hidden_layer_num: `int` number of hidden layers in MLP.
hidden_norm_args: `dict` for batchnorm arguments in MLP.
projection_dim: `int` number of ouput dimension for MLP.
projection_dim: `int` number of ou
t
put dimension for MLP.
input_specs: `tf.keras.layers.InputSpec` specs of the input tensor.
dropout_rate: `float` rate for dropout regularization.
aggregate_endpoints: `bool` aggregate all end ponits or only use the
...
...
official/projects/yt8m/dataloaders/yt8m_input.py
View file @
c1f35955
...
...
@@ -33,7 +33,7 @@ from official.vision.dataloaders import parser
def
resize_axis
(
tensor
,
axis
,
new_size
,
fill_value
=
0
):
"""Truncates or pads a tensor to new_size on
on
a given axis.
"""Truncates or pads a tensor to new_size on a given axis.
Truncate or extend tensor such that tensor.shape[axis] == new_size. If the
size increases, the padding will be performed at the end, using fill_value.
...
...
official/vision/beta/projects/centernet/README.md
View file @
c1f35955
...
...
@@ -22,7 +22,7 @@ heatmaps (one heatmap for each class) is needed to predict the object. CenterNet
proves that this can be done without a significant difference in accuracy.
## Enviroment setup
## Enviro
n
ment setup
The code can be run on multiple GPUs or TPUs with different distribution
strategies. See the TensorFlow distributed training
...
...
official/vision/beta/projects/simclr/README.md
View file @
c1f35955
...
...
@@ -10,7 +10,7 @@
An illustration of SimCLR (from
<a
href=
"https://ai.googleblog.com/2020/04/advancing-self-supervised-and-semi.html"
>
our blog here
</a>
).
</div>
## Enviroment setup
## Enviro
n
ment setup
The code can be run on multiple GPUs or TPUs with different distribution
strategies. See the TensorFlow distributed training
...
...
official/vision/beta/projects/yolo/losses/yolo_loss.py
View file @
c1f35955
...
...
@@ -323,7 +323,7 @@ class DarknetLoss(YoloLossBase):
grid_points
=
tf
.
stop_gradient
(
grid_points
)
anchor_grid
=
tf
.
stop_gradient
(
anchor_grid
)
# Split all the ground truths to use as sep
e
rate items in loss computation.
# Split all the ground truths to use as sep
a
rate items in loss computation.
(
true_box
,
ind_mask
,
true_class
)
=
tf
.
split
(
y_true
,
[
4
,
1
,
1
],
axis
=-
1
)
true_conf
=
tf
.
squeeze
(
true_conf
,
axis
=-
1
)
true_class
=
tf
.
squeeze
(
true_class
,
axis
=-
1
)
...
...
official/vision/beta/projects/yolo/modeling/layers/nn_blocks.py
View file @
c1f35955
...
...
@@ -823,7 +823,7 @@ class CSPStack(tf.keras.layers.Layer):
make it a cross stage partial. Added for ease of use. you should be able
to wrap any layer stack with a CSP independent of wether it belongs
to the Darknet family. if filter_scale = 2, then the blocks in the stack
passed into the
the
CSP stack should also have filters = filters/filter_scale
passed into the CSP stack should also have filters = filters/filter_scale
Cross Stage Partial networks (CSPNets) were proposed in:
[1] Chien-Yao Wang, Hong-Yuan Mark Liao, I-Hau Yeh, Yueh-Hua Wu,
...
...
official/vision/data/create_coco_tf_record.py
View file @
c1f35955
...
...
@@ -112,7 +112,7 @@ def generate_coco_panoptics_masks(segments_info, mask_path,
represent "stuff" and "things" classes respectively.
Returns:
A dict with
with
keys: [u'semantic_segmentation_mask', u'category_mask',
A dict with keys: [u'semantic_segmentation_mask', u'category_mask',
u'instance_mask']. The dict contains 'category_mask' and 'instance_mask'
only if `include_panoptic_eval_masks` is set to True.
"""
...
...
official/vision/examples/starter/example_task.py
View file @
c1f35955
...
...
@@ -138,7 +138,7 @@ class ExampleTask(base_task.Task):
between output from Parser and input used here.
Args:
inputs: A tuple of
of
input tensors of (features, labels).
inputs: A tuple of input tensors of (features, labels).
model: A tf.keras.Model instance.
optimizer: The optimizer for this training step.
metrics: A nested structure of metrics objects.
...
...
@@ -186,7 +186,7 @@ class ExampleTask(base_task.Task):
"""Runs validatation step.
Args:
inputs: A tuple of
of
input tensors of (features, labels).
inputs: A tuple of input tensors of (features, labels).
model: A tf.keras.Model instance.
metrics: A nested structure of metrics objects.
...
...
official/vision/ops/box_ops.py
View file @
c1f35955
...
...
@@ -616,7 +616,7 @@ def bbox_overlap(boxes, gt_boxes):
tf
.
transpose
(
gt_invalid_mask
,
[
0
,
2
,
1
]))
iou
=
tf
.
where
(
padding_mask
,
-
tf
.
ones_like
(
iou
),
iou
)
# Fills -1 for
for
invalid (-1) boxes.
# Fills -1 for invalid (-1) boxes.
boxes_invalid_mask
=
tf
.
less
(
tf
.
reduce_max
(
boxes
,
axis
=-
1
,
keepdims
=
True
),
0.0
)
iou
=
tf
.
where
(
boxes_invalid_mask
,
-
tf
.
ones_like
(
iou
),
iou
)
...
...
Prev
1
2
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment