"vscode:/vscode.git/clone" did not exist on "ba8ad4f5995c1fdc7a993bfbf8914ebd04f36944"
Commit abd510a6 authored by Hongkun Yu's avatar Hongkun Yu Committed by A. Unique TensorFlower
Browse files

Adds deprecation warning for old bert code and deprecated pattern: pack_inputs/unpack_inputs

PiperOrigin-RevId: 290111892
parent f5e6e291
...@@ -21,9 +21,16 @@ from __future__ import print_function ...@@ -21,9 +21,16 @@ from __future__ import print_function
import six import six
import tensorflow as tf import tensorflow as tf
from tensorflow.python.util import deprecation
from official.modeling import activations from official.modeling import activations
@deprecation.deprecated(
None,
"tf.keras.layers.Layer supports multiple positional args and kwargs as "
"input tensors. pack/unpack inputs to override __call__ is no longer "
"needed."
)
def pack_inputs(inputs): def pack_inputs(inputs):
"""Pack a list of `inputs` tensors to a tuple. """Pack a list of `inputs` tensors to a tuple.
...@@ -44,6 +51,12 @@ def pack_inputs(inputs): ...@@ -44,6 +51,12 @@ def pack_inputs(inputs):
return tuple(outputs) return tuple(outputs)
@deprecation.deprecated(
None,
"tf.keras.layers.Layer supports multiple positional args and kwargs as "
"input tensors. pack/unpack inputs to override __call__ is no longer "
"needed."
)
def unpack_inputs(inputs): def unpack_inputs(inputs):
"""unpack a tuple of `inputs` tensors to a tuple. """unpack a tuple of `inputs` tensors to a tuple.
......
...@@ -24,6 +24,7 @@ import math ...@@ -24,6 +24,7 @@ import math
import six import six
import tensorflow as tf import tensorflow as tf
from tensorflow.python.util import deprecation
from official.modeling import tf_utils from official.modeling import tf_utils
...@@ -145,6 +146,7 @@ class AlbertConfig(BertConfig): ...@@ -145,6 +146,7 @@ class AlbertConfig(BertConfig):
return config return config
@deprecation.deprecated(None, "The function should not be used any more.")
def get_bert_model(input_word_ids, def get_bert_model(input_word_ids,
input_mask, input_mask,
input_type_ids, input_type_ids,
...@@ -183,6 +185,8 @@ class BertModel(tf.keras.layers.Layer): ...@@ -183,6 +185,8 @@ class BertModel(tf.keras.layers.Layer):
``` ```
""" """
@deprecation.deprecated(
None, "Please use `nlp.modeling.networks.TransformerEncoder` instead.")
def __init__(self, config, float_type=tf.float32, **kwargs): def __init__(self, config, float_type=tf.float32, **kwargs):
super(BertModel, self).__init__(**kwargs) super(BertModel, self).__init__(**kwargs)
self.config = ( self.config = (
...@@ -240,6 +244,7 @@ class BertModel(tf.keras.layers.Layer): ...@@ -240,6 +244,7 @@ class BertModel(tf.keras.layers.Layer):
Args: Args:
inputs: packed input tensors. inputs: packed input tensors.
mode: string, `bert` or `encoder`. mode: string, `bert` or `encoder`.
Returns: Returns:
Output tensor of the last layer for BERT training (mode=`bert`) which Output tensor of the last layer for BERT training (mode=`bert`) which
is a float Tensor of shape [batch_size, seq_length, hidden_size] or is a float Tensor of shape [batch_size, seq_length, hidden_size] or
...@@ -358,8 +363,8 @@ class EmbeddingPostprocessor(tf.keras.layers.Layer): ...@@ -358,8 +363,8 @@ class EmbeddingPostprocessor(tf.keras.layers.Layer):
self.output_layer_norm = tf.keras.layers.LayerNormalization( self.output_layer_norm = tf.keras.layers.LayerNormalization(
name="layer_norm", axis=-1, epsilon=1e-12, dtype=tf.float32) name="layer_norm", axis=-1, epsilon=1e-12, dtype=tf.float32)
self.output_dropout = tf.keras.layers.Dropout(rate=self.dropout_prob, self.output_dropout = tf.keras.layers.Dropout(
dtype=tf.float32) rate=self.dropout_prob, dtype=tf.float32)
super(EmbeddingPostprocessor, self).build(input_shapes) super(EmbeddingPostprocessor, self).build(input_shapes)
def __call__(self, word_embeddings, token_type_ids=None, **kwargs): def __call__(self, word_embeddings, token_type_ids=None, **kwargs):
...@@ -546,8 +551,8 @@ class Dense3D(tf.keras.layers.Layer): ...@@ -546,8 +551,8 @@ class Dense3D(tf.keras.layers.Layer):
use_bias: A bool, whether the layer uses a bias. use_bias: A bool, whether the layer uses a bias.
output_projection: A bool, whether the Dense3D layer is used for output output_projection: A bool, whether the Dense3D layer is used for output
linear projection. linear projection.
backward_compatible: A bool, whether the variables shape are compatible backward_compatible: A bool, whether the variables shape are compatible with
with checkpoints converted from TF 1.x. checkpoints converted from TF 1.x.
""" """
def __init__(self, def __init__(self,
...@@ -647,7 +652,8 @@ class Dense3D(tf.keras.layers.Layer): ...@@ -647,7 +652,8 @@ class Dense3D(tf.keras.layers.Layer):
""" """
if self.backward_compatible: if self.backward_compatible:
kernel = tf.keras.backend.reshape(self.kernel, self.kernel_shape) kernel = tf.keras.backend.reshape(self.kernel, self.kernel_shape)
bias = (tf.keras.backend.reshape(self.bias, self.bias_shape) bias = (
tf.keras.backend.reshape(self.bias, self.bias_shape)
if self.use_bias else None) if self.use_bias else None)
else: else:
kernel = self.kernel kernel = self.kernel
...@@ -784,7 +790,9 @@ class TransformerBlock(tf.keras.layers.Layer): ...@@ -784,7 +790,9 @@ class TransformerBlock(tf.keras.layers.Layer):
rate=self.hidden_dropout_prob) rate=self.hidden_dropout_prob)
self.attention_layer_norm = ( self.attention_layer_norm = (
tf.keras.layers.LayerNormalization( tf.keras.layers.LayerNormalization(
name="self_attention_layer_norm", axis=-1, epsilon=1e-12, name="self_attention_layer_norm",
axis=-1,
epsilon=1e-12,
# We do layer norm in float32 for numeric stability. # We do layer norm in float32 for numeric stability.
dtype=tf.float32)) dtype=tf.float32))
self.intermediate_dense = Dense2DProjection( self.intermediate_dense = Dense2DProjection(
...@@ -909,6 +917,7 @@ class Transformer(tf.keras.layers.Layer): ...@@ -909,6 +917,7 @@ class Transformer(tf.keras.layers.Layer):
inputs: packed inputs. inputs: packed inputs.
return_all_layers: bool, whether to return outputs of all layers inside return_all_layers: bool, whether to return outputs of all layers inside
encoders. encoders.
Returns: Returns:
Output tensor of the last layer or a list of output tensors. Output tensor of the last layer or a list of output tensors.
""" """
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment