Commit fbfa69c8 authored by Zhenyu Tan's avatar Zhenyu Tan Committed by A. Unique TensorFlower
Browse files

Replace keras_nlp.module with keras_nlp.layers.module

PiperOrigin-RevId: 331620370
parent 4445edb3
...@@ -14,4 +14,4 @@ ...@@ -14,4 +14,4 @@
# ============================================================================== # ==============================================================================
"""Keras-NLP package definition.""" """Keras-NLP package definition."""
# pylint: disable=wildcard-import # pylint: disable=wildcard-import
from official.nlp.keras_nlp.layers import * from official.nlp.keras_nlp import layers
...@@ -25,7 +25,7 @@ from official.nlp.modeling.layers.util import tf_function_if_eager ...@@ -25,7 +25,7 @@ from official.nlp.modeling.layers.util import tf_function_if_eager
@tf.keras.utils.register_keras_serializable(package="Text") @tf.keras.utils.register_keras_serializable(package="Text")
class Transformer(keras_nlp.TransformerEncoderBlock): class Transformer(keras_nlp.layers.TransformerEncoderBlock):
"""Transformer layer. """Transformer layer.
This layer implements the Transformer from "Attention Is All You Need". This layer implements the Transformer from "Attention Is All You Need".
......
...@@ -114,7 +114,7 @@ class AlbertTransformerEncoder(tf.keras.Model): ...@@ -114,7 +114,7 @@ class AlbertTransformerEncoder(tf.keras.Model):
word_embeddings = self._embedding_layer(word_ids) word_embeddings = self._embedding_layer(word_ids)
# Always uses dynamic slicing for simplicity. # Always uses dynamic slicing for simplicity.
self._position_embedding_layer = keras_nlp.PositionEmbedding( self._position_embedding_layer = keras_nlp.layers.PositionEmbedding(
initializer=initializer, initializer=initializer,
max_length=max_sequence_length, max_length=max_sequence_length,
name='position_embedding') name='position_embedding')
...@@ -150,7 +150,7 @@ class AlbertTransformerEncoder(tf.keras.Model): ...@@ -150,7 +150,7 @@ class AlbertTransformerEncoder(tf.keras.Model):
data = embeddings data = embeddings
attention_mask = layers.SelfAttentionMask()([data, mask]) attention_mask = layers.SelfAttentionMask()([data, mask])
shared_layer = keras_nlp.TransformerEncoderBlock( shared_layer = keras_nlp.layers.TransformerEncoderBlock(
num_attention_heads=num_attention_heads, num_attention_heads=num_attention_heads,
inner_dim=intermediate_size, inner_dim=intermediate_size,
inner_activation=activation, inner_activation=activation,
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment