Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ModelZoo
ResNet50_tensorflow
Commits
4e9f9514
"git@developer.sourcefind.cn:wangsen/mineru.git" did not exist on "c2a18cbb047e9ab81a1e1d6cd49b4e2f6906cde5"
Commit
4e9f9514
authored
Aug 02, 2022
by
Hongkun Yu
Committed by
A. Unique TensorFlower
Aug 02, 2022
Browse files
Internal change
PiperOrigin-RevId: 464934071
parent
a81f8590
Changes
8
Hide whitespace changes
Inline
Side-by-side
Showing
8 changed files
with
14 additions
and
14 deletions
+14
-14
official/nlp/modeling/layers/transformer_scaffold.py
official/nlp/modeling/layers/transformer_scaffold.py
+1
-1
official/nlp/modeling/networks/albert_encoder.py
official/nlp/modeling/networks/albert_encoder.py
+1
-1
official/nlp/modeling/networks/classification.py
official/nlp/modeling/networks/classification.py
+1
-1
official/nlp/modeling/networks/encoder_scaffold.py
official/nlp/modeling/networks/encoder_scaffold.py
+1
-1
official/nlp/modeling/networks/mobile_bert_encoder.py
official/nlp/modeling/networks/mobile_bert_encoder.py
+1
-1
official/nlp/modeling/networks/packed_sequence_embedding.py
official/nlp/modeling/networks/packed_sequence_embedding.py
+4
-4
official/nlp/modeling/networks/span_labeling.py
official/nlp/modeling/networks/span_labeling.py
+1
-1
official/nlp/modeling/networks/xlnet_base.py
official/nlp/modeling/networks/xlnet_base.py
+4
-4
No files found.
official/nlp/modeling/layers/transformer_scaffold.py
View file @
4e9f9514
...
...
@@ -237,7 +237,7 @@ class TransformerScaffold(tf.keras.layers.Layer):
self
.
_output_layer_norm
=
tf
.
keras
.
layers
.
LayerNormalization
(
name
=
"output_layer_norm"
,
axis
=-
1
,
epsilon
=
1e-12
,
dtype
=
tf
.
float32
)
super
(
TransformerScaffold
,
self
).
build
(
input_shape
)
super
().
build
(
input_shape
)
logging
.
info
(
"%s configs: %s"
,
self
.
__class__
.
__name__
,
self
.
get_config
())
def
get_config
(
self
):
...
...
official/nlp/modeling/networks/albert_encoder.py
View file @
4e9f9514
...
...
@@ -173,7 +173,7 @@ class AlbertEncoder(tf.keras.Model):
# created using the Functional API. Once super().__init__ is called, we
# can assign attributes to `self` - note that all `self` assignments are
# below this line.
super
(
AlbertEncoder
,
self
).
__init__
(
super
().
__init__
(
inputs
=
[
word_ids
,
mask
,
type_ids
],
outputs
=
outputs
,
**
kwargs
)
config_dict
=
{
'vocab_size'
:
vocab_size
,
...
...
official/nlp/modeling/networks/classification.py
View file @
4e9f9514
...
...
@@ -74,7 +74,7 @@ class Classification(tf.keras.Model):
(
'Unknown `output` value "%s". `output` can be either "logits" or '
'"predictions"'
)
%
output
)
super
(
Classification
,
self
).
__init__
(
super
().
__init__
(
inputs
=
[
cls_output
],
outputs
=
output_tensors
,
**
kwargs
)
# b/164516224
...
...
official/nlp/modeling/networks/encoder_scaffold.py
View file @
4e9f9514
...
...
@@ -271,7 +271,7 @@ class EncoderScaffold(tf.keras.Model):
# created using the Functional API. Once super().__init__ is called, we
# can assign attributes to `self` - note that all `self` assignments are
# below this line.
super
(
EncoderScaffold
,
self
).
__init__
(
super
().
__init__
(
inputs
=
inputs
,
outputs
=
outputs
,
**
kwargs
)
self
.
_hidden_cls
=
hidden_cls
...
...
official/nlp/modeling/networks/mobile_bert_encoder.py
View file @
4e9f9514
...
...
@@ -163,7 +163,7 @@ class MobileBERTEncoder(tf.keras.Model):
encoder_outputs
=
all_layer_outputs
,
attention_scores
=
all_attention_scores
)
super
(
MobileBERTEncoder
,
self
).
__init__
(
super
().
__init__
(
inputs
=
self
.
inputs
,
outputs
=
outputs
,
**
kwargs
)
def
get_embedding_table
(
self
):
...
...
official/nlp/modeling/networks/packed_sequence_embedding.py
View file @
4e9f9514
...
...
@@ -143,7 +143,7 @@ class PackedSequenceEmbedding(tf.keras.Model):
[
attention_mask
,
sub_seq_mask
])
outputs
=
[
embeddings
,
attention_mask
]
super
(
PackedSequenceEmbedding
,
self
).
__init__
(
super
().
__init__
(
inputs
=
inputs
,
outputs
=
outputs
,
**
kwargs
)
# TF does not track immutable attrs which do not contain Trackables,
# so by creating a config namedtuple instead of a dict we avoid tracking it.
...
...
@@ -221,7 +221,7 @@ class PositionEmbeddingWithSubSeqMask(tf.keras.layers.Layer):
if
'dtype'
not
in
kwargs
:
kwargs
[
'dtype'
]
=
'float32'
super
(
PositionEmbeddingWithSubSeqMask
,
self
).
__init__
(
**
kwargs
)
super
().
__init__
(
**
kwargs
)
if
use_dynamic_slicing
and
max_sequence_length
is
None
:
raise
ValueError
(
'If `use_dynamic_slicing` is True, `max_sequence_length` must be set.'
...
...
@@ -236,7 +236,7 @@ class PositionEmbeddingWithSubSeqMask(tf.keras.layers.Layer):
'initializer'
:
tf
.
keras
.
initializers
.
serialize
(
self
.
_initializer
),
'use_dynamic_slicing'
:
self
.
_use_dynamic_slicing
,
}
base_config
=
super
(
PositionEmbeddingWithSubSeqMask
,
self
).
get_config
()
base_config
=
super
().
get_config
()
return
dict
(
list
(
base_config
.
items
())
+
list
(
config
.
items
()))
def
build
(
self
,
input_shape
):
...
...
@@ -273,7 +273,7 @@ class PositionEmbeddingWithSubSeqMask(tf.keras.layers.Layer):
shape
=
[
weight_sequence_length
,
width
],
initializer
=
self
.
_initializer
)
super
(
PositionEmbeddingWithSubSeqMask
,
self
).
build
(
input_shape
)
super
().
build
(
input_shape
)
def
call
(
self
,
inputs
,
position_ids
=
None
,
sub_sequence_mask
=
None
):
"""Implements call() for the layer.
...
...
official/nlp/modeling/networks/span_labeling.py
View file @
4e9f9514
...
...
@@ -81,7 +81,7 @@ class SpanLabeling(tf.keras.Model):
# created using the Functional API. Once super().__init__ is called, we
# can assign attributes to `self` - note that all `self` assignments are
# below this line.
super
(
SpanLabeling
,
self
).
__init__
(
super
().
__init__
(
inputs
=
[
sequence_data
],
outputs
=
output_tensors
,
**
kwargs
)
config_dict
=
{
'input_width'
:
input_width
,
...
...
official/nlp/modeling/networks/xlnet_base.py
View file @
4e9f9514
...
...
@@ -384,7 +384,7 @@ class RelativePositionEncoding(tf.keras.layers.Layer):
"""
def
__init__
(
self
,
hidden_size
,
**
kwargs
):
super
(
RelativePositionEncoding
,
self
).
__init__
(
**
kwargs
)
super
().
__init__
(
**
kwargs
)
self
.
_hidden_size
=
hidden_size
self
.
_inv_freq
=
1.0
/
(
10000.0
**
(
tf
.
range
(
0
,
self
.
_hidden_size
,
2.0
)
/
self
.
_hidden_size
))
...
...
@@ -476,7 +476,7 @@ class XLNetBase(tf.keras.layers.Layer):
use_cls_mask
=
False
,
embedding_width
=
None
,
**
kwargs
):
super
(
XLNetBase
,
self
).
__init__
(
**
kwargs
)
super
().
__init__
(
**
kwargs
)
self
.
_vocab_size
=
vocab_size
self
.
_initializer
=
initializer
...
...
@@ -574,7 +574,7 @@ class XLNetBase(tf.keras.layers.Layer):
"embedding_width"
:
self
.
_embedding_width
,
}
base_config
=
super
(
XLNetBase
,
self
).
get_config
()
base_config
=
super
().
get_config
()
return
dict
(
list
(
base_config
.
items
())
+
list
(
config
.
items
()))
def
get_embedding_lookup_table
(
self
):
...
...
@@ -601,7 +601,7 @@ class XLNetBase(tf.keras.layers.Layer):
"target_mapping"
:
target_mapping
,
"masked_tokens"
:
masked_tokens
}
return
super
(
XLNetBase
,
self
).
__call__
(
inputs
,
**
kwargs
)
return
super
().
__call__
(
inputs
,
**
kwargs
)
def
call
(
self
,
inputs
):
"""Implements call() for the layer."""
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment