Unverified Commit c1c17bd0 authored by Joydeep Bhattacharjee's avatar Joydeep Bhattacharjee Committed by GitHub
Browse files

update transformer XL with tf decorator (#16166)

* update transformer XL with tf decorator

* code fixup

* remove unused variables
parent 611d3a09
...@@ -32,8 +32,8 @@ from ...modeling_tf_utils import ( ...@@ -32,8 +32,8 @@ from ...modeling_tf_utils import (
TFPreTrainedModel, TFPreTrainedModel,
TFSequenceClassificationLoss, TFSequenceClassificationLoss,
get_initializer, get_initializer,
input_processing,
keras_serializable, keras_serializable,
unpack_inputs,
) )
from ...tf_utils import shape_list from ...tf_utils import shape_list
from ...utils import logging from ...utils import logging
...@@ -537,6 +537,7 @@ class TFTransfoXLMainLayer(tf.keras.layers.Layer): ...@@ -537,6 +537,7 @@ class TFTransfoXLMainLayer(tf.keras.layers.Layer):
return new_mems return new_mems
@unpack_inputs
def call( def call(
self, self,
input_ids=None, input_ids=None,
...@@ -549,52 +550,39 @@ class TFTransfoXLMainLayer(tf.keras.layers.Layer): ...@@ -549,52 +550,39 @@ class TFTransfoXLMainLayer(tf.keras.layers.Layer):
training=False, training=False,
**kwargs, **kwargs,
): ):
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
mems=mems,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
kwargs_call=kwargs,
)
# the original code for Transformer-XL used shapes [len, bsz] but we want a unified interface in the library # the original code for Transformer-XL used shapes [len, bsz] but we want a unified interface in the library
# so we transpose here from shape [bsz, len] to shape [len, bsz] # so we transpose here from shape [bsz, len] to shape [len, bsz]
if inputs["input_ids"] is not None and inputs["inputs_embeds"] is not None: if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif inputs["input_ids"] is not None: elif input_ids is not None:
inputs["input_ids"] = tf.transpose(inputs["input_ids"], perm=(1, 0)) input_ids = tf.transpose(input_ids, perm=(1, 0))
qlen, bsz = shape_list(inputs["input_ids"]) qlen, bsz = shape_list(input_ids)
elif inputs["inputs_embeds"] is not None: elif inputs_embeds is not None:
inputs["inputs_embeds"] = tf.transpose(inputs["inputs_embeds"], perm=(1, 0, 2)) inputs_embeds = tf.transpose(inputs_embeds, perm=(1, 0, 2))
qlen, bsz = shape_list(inputs["inputs_embeds"])[:2] qlen, bsz = shape_list(inputs_embeds)[:2]
else: else:
raise ValueError("You have to specify either input_ids or inputs_embeds") raise ValueError("You have to specify either input_ids or inputs_embeds")
if inputs["mems"] is None: if mems is None:
inputs["mems"] = self.init_mems(bsz) mems = self.init_mems(bsz)
# Prepare head mask if needed # Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head # 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N # attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] (a head_mask for each layer) # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] (a head_mask for each layer)
# and head_mask is converted to shape [num_hidden_layers x qlen x klen x bsz x n_head] # and head_mask is converted to shape [num_hidden_layers x qlen x klen x bsz x n_head]
if inputs["head_mask"] is not None: if head_mask is not None:
raise NotImplementedError raise NotImplementedError
else: else:
inputs["head_mask"] = [None] * self.n_layer head_mask = [None] * self.n_layer
if inputs["inputs_embeds"] is not None: if inputs_embeds is not None:
word_emb = inputs["inputs_embeds"] word_emb = inputs_embeds
else: else:
word_emb = self.word_emb(inputs["input_ids"]) word_emb = self.word_emb(input_ids)
mlen = shape_list(inputs["mems"][0])[0] if inputs["mems"] is not None else 0 mlen = shape_list(mems[0])[0] if mems is not None else 0
klen = mlen + qlen klen = mlen + qlen
# Compute decoder attention mask # Compute decoder attention mask
...@@ -629,52 +617,52 @@ class TFTransfoXLMainLayer(tf.keras.layers.Layer): ...@@ -629,52 +617,52 @@ class TFTransfoXLMainLayer(tf.keras.layers.Layer):
dec_attn_mask += tf.linalg.band_part(tf.ones([qlen, klen], dtype=tf.int32), -1, -mask_shift_len) dec_attn_mask += tf.linalg.band_part(tf.ones([qlen, klen], dtype=tf.int32), -1, -mask_shift_len)
hids = [] hids = []
attentions = [] if inputs["output_attentions"] else None attentions = [] if output_attentions else None
if self.attn_type == 0: # default if self.attn_type == 0: # default
pos_seq = tf.range(klen - 1, -1, -1.0) pos_seq = tf.range(klen - 1, -1, -1.0)
if self.clamp_len > 0: if self.clamp_len > 0:
pos_seq = tf.minimum(pos_seq, self.clamp_len) pos_seq = tf.minimum(pos_seq, self.clamp_len)
pos_emb = self.pos_emb(pos_seq) pos_emb = self.pos_emb(pos_seq)
core_out = self.drop(word_emb, training=inputs["training"]) core_out = self.drop(word_emb, training=training)
pos_emb = self.drop(pos_emb, training=inputs["training"]) pos_emb = self.drop(pos_emb, training=training)
for i, layer in enumerate(self.layers): for i, layer in enumerate(self.layers):
hids.append(core_out) hids.append(core_out)
mems_i = None if inputs["mems"] is None else inputs["mems"][i] mems_i = None if mems is None else mems[i]
layer_outputs = layer( layer_outputs = layer(
core_out, core_out,
pos_emb, pos_emb,
dec_attn_mask, dec_attn_mask,
mems_i, mems_i,
inputs["head_mask"][i], head_mask[i],
inputs["output_attentions"], output_attentions,
training=inputs["training"], training=training,
) )
core_out = layer_outputs[0] core_out = layer_outputs[0]
if inputs["output_attentions"]: if output_attentions:
attentions.append(layer_outputs[1]) attentions.append(layer_outputs[1])
else: # learnable embeddings and absolute embeddings else: # learnable embeddings and absolute embeddings
raise NotImplementedError # Removed these to avoid maintaining dead code - They are not used in our pretrained checkpoint raise NotImplementedError # Removed these to avoid maintaining dead code - They are not used in our pretrained checkpoint
core_out = self.drop(core_out, training=inputs["training"]) core_out = self.drop(core_out, training=training)
new_mems = self._update_mems(hids, inputs["mems"], mlen, qlen) new_mems = self._update_mems(hids, mems, mlen, qlen)
# We transpose back here to shape [bsz, len, hidden_dim] # We transpose back here to shape [bsz, len, hidden_dim]
core_out = tf.transpose(core_out, perm=(1, 0, 2)) core_out = tf.transpose(core_out, perm=(1, 0, 2))
if inputs["output_hidden_states"]: if output_hidden_states:
# Transpose to library standard shape [bsz, len, hidden_dim] and add last layer # Transpose to library standard shape [bsz, len, hidden_dim] and add last layer
hids = tuple(tf.transpose(t, perm=(1, 0, 2)) for t in hids) hids = tuple(tf.transpose(t, perm=(1, 0, 2)) for t in hids)
hids = hids + (core_out,) hids = hids + (core_out,)
else: else:
hids = None hids = None
if inputs["output_attentions"]: if output_attentions:
# Transpose to library standard shape [bsz, n_heads, query_seq_len, key_seq_len] # Transpose to library standard shape [bsz, n_heads, query_seq_len, key_seq_len]
attentions = tuple(tf.transpose(t, perm=(2, 3, 0, 1)) for t in attentions) attentions = tuple(tf.transpose(t, perm=(2, 3, 0, 1)) for t in attentions)
if not inputs["return_dict"]: if not return_dict:
return tuple(v for v in [core_out, new_mems, hids, attentions] if v is not None) return tuple(v for v in [core_out, new_mems, hids, attentions] if v is not None)
return TFTransfoXLModelOutput( return TFTransfoXLModelOutput(
...@@ -890,6 +878,7 @@ class TFTransfoXLModel(TFTransfoXLPreTrainedModel): ...@@ -890,6 +878,7 @@ class TFTransfoXLModel(TFTransfoXLPreTrainedModel):
super().__init__(config, *inputs, **kwargs) super().__init__(config, *inputs, **kwargs)
self.transformer = TFTransfoXLMainLayer(config, name="transformer") self.transformer = TFTransfoXLMainLayer(config, name="transformer")
@unpack_inputs
@add_start_docstrings_to_model_forward(TRANSFO_XL_INPUTS_DOCSTRING) @add_start_docstrings_to_model_forward(TRANSFO_XL_INPUTS_DOCSTRING)
@add_code_sample_docstrings( @add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC, processor_class=_TOKENIZER_FOR_DOC,
...@@ -909,9 +898,7 @@ class TFTransfoXLModel(TFTransfoXLPreTrainedModel): ...@@ -909,9 +898,7 @@ class TFTransfoXLModel(TFTransfoXLPreTrainedModel):
training=False, training=False,
**kwargs, **kwargs,
): ):
inputs = input_processing( outputs = self.transformer(
func=self.call,
config=self.config,
input_ids=input_ids, input_ids=input_ids,
mems=mems, mems=mems,
head_mask=head_mask, head_mask=head_mask,
...@@ -920,17 +907,6 @@ class TFTransfoXLModel(TFTransfoXLPreTrainedModel): ...@@ -920,17 +907,6 @@ class TFTransfoXLModel(TFTransfoXLPreTrainedModel):
output_hidden_states=output_hidden_states, output_hidden_states=output_hidden_states,
return_dict=return_dict, return_dict=return_dict,
training=training, training=training,
kwargs_call=kwargs,
)
outputs = self.transformer(
input_ids=inputs["input_ids"],
mems=inputs["mems"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
) )
return outputs return outputs
...@@ -982,6 +958,7 @@ class TFTransfoXLLMHeadModel(TFTransfoXLPreTrainedModel): ...@@ -982,6 +958,7 @@ class TFTransfoXLLMHeadModel(TFTransfoXLPreTrainedModel):
def init_mems(self, bsz): def init_mems(self, bsz):
return self.transformer.init_mems(bsz) return self.transformer.init_mems(bsz)
@unpack_inputs
@add_start_docstrings_to_model_forward(TRANSFO_XL_INPUTS_DOCSTRING) @add_start_docstrings_to_model_forward(TRANSFO_XL_INPUTS_DOCSTRING)
@add_code_sample_docstrings( @add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC, processor_class=_TOKENIZER_FOR_DOC,
...@@ -1002,42 +979,28 @@ class TFTransfoXLLMHeadModel(TFTransfoXLPreTrainedModel): ...@@ -1002,42 +979,28 @@ class TFTransfoXLLMHeadModel(TFTransfoXLPreTrainedModel):
training=False, training=False,
**kwargs, **kwargs,
): ):
inputs = input_processing( if input_ids is not None:
func=self.call, bsz, tgt_len = shape_list(input_ids)[:2]
config=self.config,
input_ids=input_ids,
mems=mems,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
kwargs_call=kwargs,
)
if inputs["input_ids"] is not None:
bsz, tgt_len = shape_list(inputs["input_ids"])[:2]
else: else:
bsz, tgt_len = shape_list(inputs["inputs_embeds"])[:2] bsz, tgt_len = shape_list(inputs_embeds)[:2]
transformer_outputs = self.transformer( transformer_outputs = self.transformer(
inputs["input_ids"], input_ids,
inputs["mems"], mems,
inputs["head_mask"], head_mask,
inputs["inputs_embeds"], inputs_embeds,
inputs["output_attentions"], output_attentions,
inputs["output_hidden_states"], output_hidden_states,
inputs["return_dict"], return_dict,
training=inputs["training"], training=training,
) )
last_hidden = transformer_outputs[0] last_hidden = transformer_outputs[0]
pred_hid = last_hidden[:, -tgt_len:] pred_hid = last_hidden[:, -tgt_len:]
softmax_output = self.crit(pred_hid, labels, training=inputs["training"]) softmax_output = self.crit(pred_hid, labels, training=training)
if not inputs["return_dict"]: if not return_dict:
return (softmax_output,) + transformer_outputs[1:] return (softmax_output,) + transformer_outputs[1:]
return TFTransfoXLLMHeadModelOutput( return TFTransfoXLLMHeadModelOutput(
...@@ -1063,10 +1026,9 @@ class TFTransfoXLLMHeadModel(TFTransfoXLPreTrainedModel): ...@@ -1063,10 +1026,9 @@ class TFTransfoXLLMHeadModel(TFTransfoXLPreTrainedModel):
# if past is defined in model kwargs then use it for faster decoding # if past is defined in model kwargs then use it for faster decoding
if past: if past:
inputs["mems"] = past input_ids = tf.expand_dims(input_ids[:, -1], axis=-1)
inputs["input_ids"] = tf.expand_dims(input_ids[:, -1], axis=-1)
else: else:
inputs["input_ids"] = input_ids input_ids = input_ids
return inputs return inputs
...@@ -1105,6 +1067,7 @@ class TFTransfoXLForSequenceClassification(TFTransfoXLPreTrainedModel, TFSequenc ...@@ -1105,6 +1067,7 @@ class TFTransfoXLForSequenceClassification(TFTransfoXLPreTrainedModel, TFSequenc
def get_output_embeddings(self): def get_output_embeddings(self):
return self.transformer.word_emb return self.transformer.word_emb
@unpack_inputs
@add_start_docstrings_to_model_forward(TRANSFO_XL_INPUTS_DOCSTRING) @add_start_docstrings_to_model_forward(TRANSFO_XL_INPUTS_DOCSTRING)
@add_code_sample_docstrings( @add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC, processor_class=_TOKENIZER_FOR_DOC,
...@@ -1130,9 +1093,7 @@ class TFTransfoXLForSequenceClassification(TFTransfoXLPreTrainedModel, TFSequenc ...@@ -1130,9 +1093,7 @@ class TFTransfoXLForSequenceClassification(TFTransfoXLPreTrainedModel, TFSequenc
Labels for computing the cross entropy classification loss. Indices should be in `[0, ..., Labels for computing the cross entropy classification loss. Indices should be in `[0, ...,
config.vocab_size - 1]`. config.vocab_size - 1]`.
""" """
inputs = input_processing( transformer_outputs = self.transformer(
func=self.call,
config=self.config,
input_ids=input_ids, input_ids=input_ids,
mems=mems, mems=mems,
head_mask=head_mask, head_mask=head_mask,
...@@ -1140,20 +1101,7 @@ class TFTransfoXLForSequenceClassification(TFTransfoXLPreTrainedModel, TFSequenc ...@@ -1140,20 +1101,7 @@ class TFTransfoXLForSequenceClassification(TFTransfoXLPreTrainedModel, TFSequenc
output_attentions=output_attentions, output_attentions=output_attentions,
output_hidden_states=output_hidden_states, output_hidden_states=output_hidden_states,
return_dict=return_dict, return_dict=return_dict,
labels=labels,
training=training, training=training,
kwargs_call=kwargs,
)
transformer_outputs = self.transformer(
input_ids=inputs["input_ids"],
mems=inputs["mems"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
) )
hidden_states = transformer_outputs[0] hidden_states = transformer_outputs[0]
...@@ -1162,12 +1110,12 @@ class TFTransfoXLForSequenceClassification(TFTransfoXLPreTrainedModel, TFSequenc ...@@ -1162,12 +1110,12 @@ class TFTransfoXLForSequenceClassification(TFTransfoXLPreTrainedModel, TFSequenc
if self.config.pad_token_id is None: if self.config.pad_token_id is None:
sequence_lengths = -1 sequence_lengths = -1
else: else:
if inputs["input_ids"] is not None: if input_ids is not None:
sequence_lengths = ( sequence_lengths = (
tf.reduce_sum( tf.reduce_sum(
tf.cast( tf.cast(
tf.math.not_equal(inputs["input_ids"], self.config.pad_token_id), tf.math.not_equal(input_ids, self.config.pad_token_id),
dtype=inputs["input_ids"].dtype, dtype=input_ids.dtype,
), ),
-1, -1,
keepdims=False, keepdims=False,
...@@ -1183,11 +1131,11 @@ class TFTransfoXLForSequenceClassification(TFTransfoXLPreTrainedModel, TFSequenc ...@@ -1183,11 +1131,11 @@ class TFTransfoXLForSequenceClassification(TFTransfoXLPreTrainedModel, TFSequenc
) )
loss = None loss = None
if inputs["labels"] is not None: if labels is not None:
if input_ids is not None: if input_ids is not None:
batch_size, sequence_length = shape_list(inputs["input_ids"])[:2] batch_size, sequence_length = shape_list(input_ids)[:2]
else: else:
batch_size, sequence_length = shape_list(inputs["inputs_embeds"])[:2] batch_size, sequence_length = shape_list(inputs_embeds)[:2]
assert ( assert (
self.config.pad_token_id is not None or batch_size == 1 self.config.pad_token_id is not None or batch_size == 1
), "Cannot handle batch sizes > 1 if no padding token is defined." ), "Cannot handle batch sizes > 1 if no padding token is defined."
...@@ -1195,13 +1143,11 @@ class TFTransfoXLForSequenceClassification(TFTransfoXLPreTrainedModel, TFSequenc ...@@ -1195,13 +1143,11 @@ class TFTransfoXLForSequenceClassification(TFTransfoXLPreTrainedModel, TFSequenc
if not tf.is_tensor(sequence_lengths): if not tf.is_tensor(sequence_lengths):
in_logits = logits[0:batch_size, sequence_lengths] in_logits = logits[0:batch_size, sequence_lengths]
loss = self.hf_compute_loss( loss = self.hf_compute_loss(tf.reshape(labels, [-1, 1]), tf.reshape(in_logits, [-1, self.num_labels]))
tf.reshape(inputs["labels"], [-1, 1]), tf.reshape(in_logits, [-1, self.num_labels])
)
pooled_logits = in_logits if in_logits is not None else logits pooled_logits = in_logits if in_logits is not None else logits
if not inputs["return_dict"]: if not return_dict:
output = (pooled_logits,) + transformer_outputs[1:] output = (pooled_logits,) + transformer_outputs[1:]
return ((loss,) + output) if loss is not None else output return ((loss,) + output) if loss is not None else output
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment