"git@developer.sourcefind.cn:dadigang/Ventoy.git" did not exist on "022322c8faf52d22af31617a36ca08efee63920a"
Unverified Commit dad5ca83 authored by Joao Gante's avatar Joao Gante Committed by GitHub
Browse files

TF: Finalize `unpack_inputs`-related changes (#16499)

* Add unpack_inputs to remaining models

* removed kwargs to `call()` in TF models

* fix TF T5 tests
parent be9474bd
......@@ -706,7 +706,6 @@ class TFLayoutLMMainLayer(tf.keras.layers.Layer):
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
training: bool = False,
**kwargs,
) -> Union[TFBaseModelOutputWithPoolingAndCrossAttentions, Tuple[tf.Tensor]]:
if input_ids is not None and inputs_embeds is not None:
......@@ -928,7 +927,6 @@ class TFLayoutLMModel(TFLayoutLMPreTrainedModel):
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
training: Optional[bool] = False,
**kwargs,
) -> Union[TFBaseModelOutputWithPoolingAndCrossAttentions, Tuple[tf.Tensor]]:
r"""
Returns:
......@@ -1048,7 +1046,6 @@ class TFLayoutLMForMaskedLM(TFLayoutLMPreTrainedModel, TFMaskedLanguageModelingL
return_dict: Optional[bool] = None,
labels: Optional[Union[np.ndarray, tf.Tensor]] = None,
training: Optional[bool] = False,
**kwargs,
) -> Union[TFMaskedLMOutput, Tuple[tf.Tensor]]:
r"""
labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
......@@ -1172,7 +1169,6 @@ class TFLayoutLMForSequenceClassification(TFLayoutLMPreTrainedModel, TFSequenceC
return_dict: Optional[bool] = None,
labels: Optional[Union[np.ndarray, tf.Tensor]] = None,
training: Optional[bool] = False,
**kwargs,
) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
r"""
labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*):
......@@ -1303,7 +1299,6 @@ class TFLayoutLMForTokenClassification(TFLayoutLMPreTrainedModel, TFTokenClassif
return_dict: Optional[bool] = None,
labels: Optional[Union[np.ndarray, tf.Tensor]] = None,
training: Optional[bool] = False,
**kwargs,
) -> Union[TFTokenClassifierOutput, Tuple[tf.Tensor]]:
r"""
labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
......
......@@ -1666,7 +1666,6 @@ class TFLEDEncoder(tf.keras.layers.Layer):
output_hidden_states=None,
return_dict=None,
training=False,
**kwargs,
):
"""
Args:
......@@ -1911,7 +1910,6 @@ class TFLEDDecoder(tf.keras.layers.Layer):
output_hidden_states=None,
return_dict=None,
training=False,
**kwargs,
):
r"""
Args:
......@@ -2333,7 +2331,6 @@ class TFLEDForConditionalGeneration(TFLEDPreTrainedModel):
return_dict=None,
labels=None,
training=False,
**kwargs,
):
"""
Returns:
......@@ -2429,7 +2426,7 @@ class TFLEDForConditionalGeneration(TFLEDPreTrainedModel):
decoder_head_mask=None,
use_cache=None,
encoder_outputs=None,
**kwargs,
**kwargs
):
# cut decoder_input_ids if past is used
if past is not None:
......
......@@ -1676,7 +1676,6 @@ class TFLongformerMainLayer(tf.keras.layers.Layer):
output_hidden_states=None,
return_dict=None,
training=False,
**kwargs,
):
if input_ids is not None and inputs_embeds is not None:
......@@ -2023,7 +2022,6 @@ class TFLongformerModel(TFLongformerPreTrainedModel):
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
training: Optional[bool] = False,
**kwargs,
) -> Union[TFLongformerBaseModelOutputWithPooling, Tuple[tf.Tensor]]:
outputs = self.longformer(
......@@ -2100,7 +2098,6 @@ class TFLongformerForMaskedLM(TFLongformerPreTrainedModel, TFMaskedLanguageModel
return_dict: Optional[bool] = None,
labels: Optional[Union[np.ndarray, tf.Tensor]] = None,
training: Optional[bool] = False,
**kwargs,
) -> Union[TFLongformerMaskedLMOutput, Tuple[tf.Tensor]]:
r"""
labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
......@@ -2194,7 +2191,6 @@ class TFLongformerForQuestionAnswering(TFLongformerPreTrainedModel, TFQuestionAn
start_positions: Optional[Union[np.ndarray, tf.Tensor]] = None,
end_positions: Optional[Union[np.ndarray, tf.Tensor]] = None,
training: Optional[bool] = False,
**kwargs,
) -> Union[TFLongformerQuestionAnsweringModelOutput, Tuple[tf.Tensor]]:
r"""
start_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
......@@ -2340,7 +2336,6 @@ class TFLongformerForSequenceClassification(TFLongformerPreTrainedModel, TFSeque
return_dict: Optional[bool] = None,
labels: Optional[Union[np.ndarray, tf.Tensor]] = None,
training: Optional[bool] = False,
**kwargs,
) -> Union[TFLongformerSequenceClassifierOutput, Tuple[tf.Tensor]]:
if global_attention_mask is None and input_ids is not None:
......@@ -2450,7 +2445,6 @@ class TFLongformerForMultipleChoice(TFLongformerPreTrainedModel, TFMultipleChoic
return_dict: Optional[bool] = None,
labels: Optional[Union[np.ndarray, tf.Tensor]] = None,
training: Optional[bool] = False,
**kwargs,
) -> Union[TFLongformerMultipleChoiceModelOutput, Tuple[tf.Tensor]]:
r"""
labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
......@@ -2580,7 +2574,6 @@ class TFLongformerForTokenClassification(TFLongformerPreTrainedModel, TFTokenCla
return_dict: Optional[bool] = None,
labels: Optional[Union[np.array, tf.Tensor]] = None,
training: Optional[bool] = False,
**kwargs,
) -> Union[TFLongformerTokenClassifierOutput, Tuple[tf.Tensor]]:
r"""
labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
......
......@@ -685,7 +685,6 @@ class TFLxmertMainLayer(tf.keras.layers.Layer):
output_hidden_states=None,
return_dict=None,
training=False,
**kwargs,
):
if input_ids is not None and inputs_embeds is not None:
......@@ -946,7 +945,6 @@ class TFLxmertModel(TFLxmertPreTrainedModel):
output_hidden_states=None,
return_dict=None,
training=False,
**kwargs,
):
outputs = self.lxmert(
input_ids,
......@@ -1282,7 +1280,6 @@ class TFLxmertForPreTraining(TFLxmertPreTrainedModel):
output_hidden_states=None,
return_dict=None,
training=False,
**kwargs,
):
r"""
masked_lm_labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
......
......@@ -707,7 +707,6 @@ class TFMarianEncoder(tf.keras.layers.Layer):
output_hidden_states=None,
return_dict=None,
training=False,
**kwargs,
):
"""
Args:
......@@ -866,7 +865,6 @@ class TFMarianDecoder(tf.keras.layers.Layer):
output_hidden_states=None,
return_dict=None,
training=False,
**kwargs,
):
r"""
Args:
......@@ -1296,7 +1294,6 @@ class TFMarianMTModel(TFMarianPreTrainedModel, TFCausalLanguageModelingLoss):
return_dict=None,
labels=None,
training=False,
**kwargs,
):
r"""
labels (`tf.tensor` of shape `(batch_size, sequence_length)`, *optional*):
......
......@@ -684,7 +684,6 @@ class TFMBartEncoder(tf.keras.layers.Layer):
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
training: Optional[bool] = False,
**kwargs,
) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
"""
Args:
......@@ -848,7 +847,6 @@ class TFMBartDecoder(tf.keras.layers.Layer):
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
training: Optional[bool] = False,
**kwargs,
) -> Union[
TFBaseModelOutputWithPastAndCrossAttentions, Tuple[tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor]
]:
......@@ -1278,7 +1276,7 @@ class TFMBartForConditionalGeneration(TFMBartPreTrainedModel, TFCausalLanguageMo
decoder_head_mask: Optional[tf.Tensor] = None,
cross_attn_head_mask: Optional[tf.Tensor] = None,
encoder_outputs: Optional[TFBaseModelOutput] = None,
past_key_values: [Tuple[Tuple[tf.Tensor]]] = None,
past_key_values: Tuple[Tuple[tf.Tensor]] = None,
inputs_embeds: Optional[tf.Tensor] = None,
decoder_inputs_embeds: Optional[tf.Tensor] = None,
use_cache: Optional[bool] = None,
......@@ -1287,7 +1285,6 @@ class TFMBartForConditionalGeneration(TFMBartPreTrainedModel, TFCausalLanguageMo
return_dict: Optional[bool] = None,
labels: Optional[tf.Tensor] = None,
training: Optional[bool] = False,
**kwargs,
) -> Union[TFSeq2SeqLMOutput, Tuple[tf.Tensor]]:
"""
labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
......
......@@ -692,7 +692,6 @@ class TFMobileBertMainLayer(tf.keras.layers.Layer):
output_hidden_states=None,
return_dict=None,
training=False,
**kwargs,
):
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
......@@ -928,7 +927,6 @@ class TFMobileBertModel(TFMobileBertPreTrainedModel):
output_hidden_states=None,
return_dict=None,
training=False,
**kwargs,
):
outputs = self.mobilebert(
input_ids=input_ids,
......@@ -993,7 +991,6 @@ class TFMobileBertForPreTraining(TFMobileBertPreTrainedModel):
output_hidden_states=None,
return_dict=None,
training=False,
**kwargs,
):
r"""
Return:
......@@ -1092,7 +1089,6 @@ class TFMobileBertForMaskedLM(TFMobileBertPreTrainedModel, TFMaskedLanguageModel
return_dict=None,
labels=None,
training=False,
**kwargs,
):
r"""
labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
......@@ -1176,7 +1172,6 @@ class TFMobileBertForNextSentencePrediction(TFMobileBertPreTrainedModel, TFNextS
return_dict=None,
next_sentence_label=None,
training=False,
**kwargs,
):
r"""
Return:
......@@ -1287,7 +1282,6 @@ class TFMobileBertForSequenceClassification(TFMobileBertPreTrainedModel, TFSeque
return_dict=None,
labels=None,
training=False,
**kwargs,
):
r"""
labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
......@@ -1381,7 +1375,6 @@ class TFMobileBertForQuestionAnswering(TFMobileBertPreTrainedModel, TFQuestionAn
start_positions=None,
end_positions=None,
training=False,
**kwargs,
):
r"""
start_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
......@@ -1498,7 +1491,6 @@ class TFMobileBertForMultipleChoice(TFMobileBertPreTrainedModel, TFMultipleChoic
return_dict=None,
labels=None,
training=False,
**kwargs,
):
r"""
labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
......@@ -1626,7 +1618,6 @@ class TFMobileBertForTokenClassification(TFMobileBertPreTrainedModel, TFTokenCla
return_dict=None,
labels=None,
training=False,
**kwargs,
):
r"""
labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
......
......@@ -497,7 +497,6 @@ class TFMPNetMainLayer(tf.keras.layers.Layer):
output_hidden_states=None,
return_dict=None,
training=False,
**kwargs,
):
if input_ids is not None and inputs_embeds is not None:
......@@ -686,7 +685,6 @@ class TFMPNetModel(TFMPNetPreTrainedModel):
output_hidden_states=None,
return_dict=None,
training=False,
**kwargs,
):
outputs = self.mpnet(
input_ids=input_ids,
......@@ -803,7 +801,6 @@ class TFMPNetForMaskedLM(TFMPNetPreTrainedModel, TFMaskedLanguageModelingLoss):
return_dict=None,
labels=None,
training=False,
**kwargs,
):
r"""
labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
......@@ -909,7 +906,6 @@ class TFMPNetForSequenceClassification(TFMPNetPreTrainedModel, TFSequenceClassif
return_dict=None,
labels=None,
training=False,
**kwargs,
):
r"""
labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
......@@ -1000,7 +996,6 @@ class TFMPNetForMultipleChoice(TFMPNetPreTrainedModel, TFMultipleChoiceLoss):
return_dict=None,
labels=None,
training=False,
**kwargs,
):
r"""
labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
......@@ -1112,7 +1107,6 @@ class TFMPNetForTokenClassification(TFMPNetPreTrainedModel, TFTokenClassificatio
return_dict=None,
labels=None,
training=False,
**kwargs,
):
r"""
labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
......
......@@ -249,7 +249,6 @@ class TFOpenAIGPTMainLayer(tf.keras.layers.Layer):
output_hidden_states=None,
return_dict=None,
training=False,
**kwargs,
):
if input_ids is not None and inputs_embeds is not None:
......@@ -522,7 +521,6 @@ class TFOpenAIGPTModel(TFOpenAIGPTPreTrainedModel):
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
training: Optional[bool] = False,
**kwargs,
) -> Union[Tuple, TFBaseModelOutput]:
outputs = self.transformer(
......@@ -586,7 +584,6 @@ class TFOpenAIGPTLMHeadModel(TFOpenAIGPTPreTrainedModel, TFCausalLanguageModelin
return_dict: Optional[bool] = None,
labels: Optional[Union[np.ndarray, tf.Tensor]] = None,
training: Optional[bool] = False,
**kwargs,
) -> Union[Tuple, TFCausalLMOutput]:
r"""
labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
......@@ -669,7 +666,6 @@ class TFOpenAIGPTDoubleHeadsModel(TFOpenAIGPTPreTrainedModel):
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
training: Optional[bool] = False,
**kwargs,
) -> Union[Tuple, TFOpenAIGPTDoubleHeadsModelOutput]:
r"""
mc_token_ids (`tf.Tensor` or `Numpy array` of shape `(batch_size, num_choices)`, *optional*, default to index of the last token of the input):
......@@ -813,7 +809,6 @@ class TFOpenAIGPTForSequenceClassification(TFOpenAIGPTPreTrainedModel, TFSequenc
return_dict: Optional[bool] = None,
labels: Optional[Union[np.ndarray, tf.Tensor]] = None,
training: Optional[bool] = False,
**kwargs,
) -> Union[Tuple, TFSequenceClassifierOutput]:
r"""
labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
......
......@@ -710,7 +710,6 @@ class TFPegasusEncoder(tf.keras.layers.Layer):
output_hidden_states=None,
return_dict=None,
training=False,
**kwargs,
):
"""
Args:
......@@ -872,7 +871,6 @@ class TFPegasusDecoder(tf.keras.layers.Layer):
output_hidden_states=None,
return_dict=None,
training=False,
**kwargs,
):
r"""
Args:
......@@ -1305,7 +1303,6 @@ class TFPegasusForConditionalGeneration(TFPegasusPreTrainedModel, TFCausalLangua
return_dict=None,
labels=None,
training=False,
**kwargs,
):
"""
labels (`tf.tensor` of shape `(batch_size, sequence_length)`, *optional*):
......
......@@ -660,7 +660,6 @@ class TFRemBertMainLayer(tf.keras.layers.Layer):
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
training: bool = False,
**kwargs,
) -> Union[TFBaseModelOutputWithPoolingAndCrossAttentions, Tuple[tf.Tensor]]:
if not self.config.is_decoder:
......@@ -959,7 +958,6 @@ class TFRemBertModel(TFRemBertPreTrainedModel):
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
training: Optional[bool] = False,
**kwargs,
) -> Union[TFBaseModelOutputWithPoolingAndCrossAttentions, Tuple[tf.Tensor]]:
r"""
encoder_hidden_states (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
......@@ -1060,7 +1058,6 @@ class TFRemBertForMaskedLM(TFRemBertPreTrainedModel, TFMaskedLanguageModelingLos
return_dict: Optional[bool] = None,
labels: Optional[Union[np.ndarray, tf.Tensor]] = None,
training: Optional[bool] = False,
**kwargs,
) -> Union[TFMaskedLMOutput, Tuple[tf.Tensor]]:
r"""
labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
......@@ -1155,7 +1152,6 @@ class TFRemBertForCausalLM(TFRemBertPreTrainedModel, TFCausalLanguageModelingLos
return_dict: Optional[bool] = None,
labels: Optional[Union[np.ndarray, tf.Tensor]] = None,
training: Optional[bool] = False,
**kwargs,
) -> Union[TFCausalLMOutputWithCrossAttentions, Tuple[tf.Tensor]]:
r"""
encoder_hidden_states (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
......@@ -1283,7 +1279,6 @@ class TFRemBertForSequenceClassification(TFRemBertPreTrainedModel, TFSequenceCla
return_dict: Optional[bool] = None,
labels: Optional[Union[np.ndarray, tf.Tensor]] = None,
training: Optional[bool] = False,
**kwargs,
) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
r"""
labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*):
......@@ -1374,7 +1369,6 @@ class TFRemBertForMultipleChoice(TFRemBertPreTrainedModel, TFMultipleChoiceLoss)
return_dict: Optional[bool] = None,
labels: Optional[Union[np.ndarray, tf.Tensor]] = None,
training: Optional[bool] = False,
**kwargs,
) -> Union[TFMultipleChoiceModelOutput, Tuple[tf.Tensor]]:
r"""
labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*):
......@@ -1494,7 +1488,6 @@ class TFRemBertForTokenClassification(TFRemBertPreTrainedModel, TFTokenClassific
return_dict: Optional[bool] = None,
labels: Optional[Union[np.ndarray, tf.Tensor]] = None,
training: Optional[bool] = False,
**kwargs,
) -> Union[TFTokenClassifierOutput, Tuple[tf.Tensor]]:
r"""
labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
......@@ -1575,7 +1568,6 @@ class TFRemBertForQuestionAnswering(TFRemBertPreTrainedModel, TFQuestionAnswerin
start_positions: Optional[Union[np.ndarray, tf.Tensor]] = None,
end_positions: Optional[Union[np.ndarray, tf.Tensor]] = None,
training: Optional[bool] = False,
**kwargs,
) -> Union[TFQuestionAnsweringModelOutput, Tuple[tf.Tensor]]:
r"""
start_positions (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*):
......
......@@ -624,7 +624,6 @@ class TFRobertaMainLayer(tf.keras.layers.Layer):
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
training: bool = False,
**kwargs,
) -> Union[TFBaseModelOutputWithPoolingAndCrossAttentions, Tuple[tf.Tensor]]:
if not self.config.is_decoder:
......@@ -936,7 +935,6 @@ class TFRobertaModel(TFRobertaPreTrainedModel):
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
training: Optional[bool] = False,
**kwargs,
) -> Union[Tuple, TFBaseModelOutputWithPoolingAndCrossAttentions]:
r"""
encoder_hidden_states (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
......@@ -1093,7 +1091,6 @@ class TFRobertaForMaskedLM(TFRobertaPreTrainedModel, TFMaskedLanguageModelingLos
return_dict: Optional[bool] = None,
labels: Optional[Union[np.ndarray, tf.Tensor]] = None,
training: Optional[bool] = False,
**kwargs,
) -> Union[TFMaskedLMOutput, Tuple[tf.Tensor]]:
r"""
labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
......@@ -1196,7 +1193,6 @@ class TFRobertaForCausalLM(TFRobertaPreTrainedModel, TFCausalLanguageModelingLos
return_dict: Optional[bool] = None,
labels: Optional[Union[np.ndarray, tf.Tensor]] = None,
training: Optional[bool] = False,
**kwargs,
) -> Union[TFCausalLMOutputWithCrossAttentions, Tuple[tf.Tensor]]:
r"""
encoder_hidden_states (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
......@@ -1353,7 +1349,6 @@ class TFRobertaForSequenceClassification(TFRobertaPreTrainedModel, TFSequenceCla
return_dict: Optional[bool] = None,
labels: Optional[Union[np.ndarray, tf.Tensor]] = None,
training: Optional[bool] = False,
**kwargs,
) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
r"""
labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
......@@ -1449,7 +1444,6 @@ class TFRobertaForMultipleChoice(TFRobertaPreTrainedModel, TFMultipleChoiceLoss)
return_dict: Optional[bool] = None,
labels: Optional[Union[np.ndarray, tf.Tensor]] = None,
training: Optional[bool] = False,
**kwargs,
) -> Union[TFMultipleChoiceModelOutput, Tuple[tf.Tensor]]:
r"""
labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
......@@ -1567,7 +1561,6 @@ class TFRobertaForTokenClassification(TFRobertaPreTrainedModel, TFTokenClassific
return_dict: Optional[bool] = None,
labels: Optional[Union[np.ndarray, tf.Tensor]] = None,
training: Optional[bool] = False,
**kwargs,
) -> Union[TFTokenClassifierOutput, Tuple[tf.Tensor]]:
r"""
labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
......@@ -1655,7 +1648,6 @@ class TFRobertaForQuestionAnswering(TFRobertaPreTrainedModel, TFQuestionAnswerin
start_positions: Optional[Union[np.ndarray, tf.Tensor]] = None,
end_positions: Optional[Union[np.ndarray, tf.Tensor]] = None,
training: Optional[bool] = False,
**kwargs,
) -> Union[TFQuestionAnsweringModelOutput, Tuple[tf.Tensor]]:
r"""
start_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
......
......@@ -614,7 +614,6 @@ class TFRoFormerMainLayer(tf.keras.layers.Layer):
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
training: bool = False,
**kwargs,
) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
if input_ids is not None and inputs_embeds is not None:
......@@ -817,7 +816,6 @@ class TFRoFormerModel(TFRoFormerPreTrainedModel):
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
training: Optional[bool] = False,
**kwargs,
) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]:
outputs = self.roformer(
input_ids=input_ids,
......@@ -877,7 +875,6 @@ class TFRoFormerForMaskedLM(TFRoFormerPreTrainedModel, TFMaskedLanguageModelingL
return_dict: Optional[bool] = None,
labels: Optional[Union[np.ndarray, tf.Tensor]] = None,
training: Optional[bool] = False,
**kwargs,
) -> Union[TFMaskedLMOutput, Tuple[tf.Tensor]]:
r"""
labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
......@@ -953,7 +950,6 @@ class TFRoFormerForCausalLM(TFRoFormerPreTrainedModel, TFCausalLanguageModelingL
return_dict: Optional[bool] = None,
labels: Optional[Union[np.ndarray, tf.Tensor]] = None,
training: Optional[bool] = False,
**kwargs,
) -> Union[TFCausalLMOutput, Tuple[tf.Tensor]]:
r"""
labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
......@@ -1064,7 +1060,6 @@ class TFRoFormerForSequenceClassification(TFRoFormerPreTrainedModel, TFSequenceC
return_dict: Optional[bool] = None,
labels: Optional[Union[np.ndarray, tf.Tensor]] = None,
training: Optional[bool] = False,
**kwargs,
) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
r"""
labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*):
......@@ -1155,7 +1150,6 @@ class TFRoFormerForMultipleChoice(TFRoFormerPreTrainedModel, TFMultipleChoiceLos
return_dict: Optional[bool] = None,
labels: Optional[Union[np.ndarray, tf.Tensor]] = None,
training: Optional[bool] = False,
**kwargs,
) -> Union[TFMultipleChoiceModelOutput, Tuple[tf.Tensor]]:
r"""
labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*):
......@@ -1269,7 +1263,6 @@ class TFRoFormerForTokenClassification(TFRoFormerPreTrainedModel, TFTokenClassif
return_dict: Optional[bool] = None,
labels: Optional[Union[np.ndarray, tf.Tensor]] = None,
training: Optional[bool] = False,
**kwargs,
) -> Union[TFTokenClassifierOutput, Tuple[tf.Tensor]]:
r"""
labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
......@@ -1348,7 +1341,6 @@ class TFRoFormerForQuestionAnswering(TFRoFormerPreTrainedModel, TFQuestionAnswer
start_positions: Optional[Union[np.ndarray, tf.Tensor]] = None,
end_positions: Optional[Union[np.ndarray, tf.Tensor]] = None,
training: Optional[bool] = False,
**kwargs,
) -> Union[TFQuestionAnsweringModelOutput, Tuple[tf.Tensor]]:
r"""
start_positions (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*):
......
......@@ -791,7 +791,6 @@ class TFSpeech2TextEncoder(tf.keras.layers.Layer):
output_hidden_states=None,
return_dict=None,
training=False,
**kwargs,
):
"""
Args:
......@@ -957,7 +956,6 @@ class TFSpeech2TextDecoder(tf.keras.layers.Layer):
output_hidden_states=None,
return_dict=None,
training=False,
**kwargs,
):
r"""
Args:
......
......@@ -654,7 +654,6 @@ class TFT5MainLayer(tf.keras.layers.Layer):
output_hidden_states=None,
return_dict=None,
training=False,
**kwargs,
) -> Tuple:
if input_ids is not None and inputs_embeds is not None:
......@@ -1152,7 +1151,6 @@ class TFT5Model(TFT5PreTrainedModel):
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
training: Optional[bool] = False,
**kwargs,
) -> Union[Tuple, TFSeq2SeqModelOutput]:
r"""
Returns:
......@@ -1329,7 +1327,6 @@ class TFT5ForConditionalGeneration(TFT5PreTrainedModel, TFCausalLanguageModeling
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
training: Optional[bool] = False,
**kwargs,
) -> Union[Tuple, TFSeq2SeqLMOutput]:
r"""
labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
......@@ -1611,6 +1608,10 @@ class TFT5EncoderModel(TFT5PreTrainedModel):
encoder_config.use_cache = False
self.encoder = TFT5MainLayer(encoder_config, embed_tokens, name="encoder")
@property
def dummy_inputs(self):
return {"input_ids": tf.constant(DUMMY_INPUTS)}
def get_encoder(self):
return self.encoder
......@@ -1627,7 +1628,6 @@ class TFT5EncoderModel(TFT5PreTrainedModel):
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
training: Optional[bool] = False,
**kwargs,
) -> Union[Tuple, TFBaseModelOutput]:
r"""
Returns:
......@@ -1670,6 +1670,19 @@ class TFT5EncoderModel(TFT5PreTrainedModel):
attentions=encoder_outputs.attentions,
)
@tf.function(
input_signature=[
{
"input_ids": tf.TensorSpec((None, None), tf.int32, name="input_ids"),
"attention_mask": tf.TensorSpec((None, None), tf.int32, name="attention_mask"),
}
]
)
def serving(self, inputs):
output = self.call(inputs)
return self.serving_output(output)
# Copied from transformers.models.distilbert.modeling_tf_distilbert.TFDistilBertModel.serving_output
def serving_output(self, output):
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
......
......@@ -770,7 +770,6 @@ class TFTapasMainLayer(tf.keras.layers.Layer):
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
training: bool = False,
**kwargs,
) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]:
if input_ids is not None and inputs_embeds is not None:
......@@ -980,7 +979,6 @@ class TFTapasModel(TFTapasPreTrainedModel):
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
training: Optional[bool] = False,
**kwargs,
) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]:
r"""
Returns:
......@@ -1067,7 +1065,6 @@ class TFTapasForMaskedLM(TFTapasPreTrainedModel, TFMaskedLanguageModelingLoss):
return_dict: Optional[bool] = None,
labels: Optional[Union[np.ndarray, tf.Tensor]] = None,
training: Optional[bool] = False,
**kwargs,
) -> Union[TFMaskedLMOutput, Tuple[tf.Tensor]]:
r"""
labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
......@@ -1285,7 +1282,6 @@ class TFTapasForQuestionAnswering(TFTapasPreTrainedModel):
return_dict: Optional[bool] = None,
labels: Optional[Union[np.ndarray, tf.Tensor]] = None,
training: Optional[bool] = False,
**kwargs,
) -> Union[TFTableQuestionAnsweringOutput, Tuple[tf.Tensor]]:
r"""
table_mask (`tf.Tensor` of shape `(batch_size, seq_length)`, *optional*):
......@@ -1602,7 +1598,6 @@ class TFTapasForSequenceClassification(TFTapasPreTrainedModel, TFSequenceClassif
return_dict: Optional[bool] = None,
labels: Optional[Union[np.ndarray, tf.Tensor]] = None,
training: Optional[bool] = False,
**kwargs,
) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
......
......@@ -550,7 +550,6 @@ class TFTransfoXLMainLayer(tf.keras.layers.Layer):
output_hidden_states=None,
return_dict=None,
training=False,
**kwargs,
):
# the original code for Transformer-XL used shapes [len, bsz] but we want a unified interface in the library
......@@ -898,7 +897,6 @@ class TFTransfoXLModel(TFTransfoXLPreTrainedModel):
output_hidden_states=None,
return_dict=None,
training=False,
**kwargs,
):
outputs = self.transformer(
input_ids=input_ids,
......@@ -979,7 +977,6 @@ class TFTransfoXLLMHeadModel(TFTransfoXLPreTrainedModel):
return_dict=None,
labels=None,
training=False,
**kwargs,
):
if input_ids is not None:
bsz, tgt_len = shape_list(input_ids)[:2]
......@@ -1088,7 +1085,6 @@ class TFTransfoXLForSequenceClassification(TFTransfoXLPreTrainedModel, TFSequenc
return_dict: Optional[bool] = None,
labels: Optional[Union[np.ndarray, tf.Tensor]] = None,
training: Optional[bool] = False,
**kwargs,
) -> Union[Tuple, TFTransfoXLSequenceClassifierOutputWithPast]:
r"""
labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
......
......@@ -23,7 +23,7 @@ import tensorflow as tf
from ...configuration_utils import PretrainedConfig
from ...modeling_tf_outputs import TFBaseModelOutput, TFSeq2SeqLMOutput
from ...modeling_tf_utils import TFCausalLanguageModelingLoss, TFPreTrainedModel, get_initializer, input_processing
from ...modeling_tf_utils import TFCausalLanguageModelingLoss, TFPreTrainedModel, get_initializer, unpack_inputs
from ...tf_utils import shape_list
from ...utils import (
DUMMY_INPUTS,
......@@ -510,6 +510,7 @@ class TFVisionEncoderDecoderModel(TFPreTrainedModel, TFCausalLanguageModelingLos
config = VisionEncoderDecoderConfig.from_encoder_decoder_configs(encoder.config, decoder.config, **kwargs)
return cls(encoder=encoder, decoder=decoder, config=config)
@unpack_inputs
@add_start_docstrings_to_model_forward(
VISION_ENCODER_DECODER_INPUTS_DOCSTRING.format("batch_size, sequence_length")
)
......@@ -585,21 +586,16 @@ class TFVisionEncoderDecoderModel(TFPreTrainedModel, TFCausalLanguageModelingLos
if encoder_outputs is None:
encoder_processing_inputs = {
"func": self.encoder.call,
"config": self.encoder.config,
encoder_inputs = {
"input_ids": pixel_values,
"output_attentions": output_attentions,
"output_hidden_states": output_hidden_states,
"return_dict": return_dict,
"training": training,
"kwargs_call": {},
}
# Add arguments to encoder from `kwargs_encoder`
encoder_processing_inputs.update(kwargs_encoder)
encoder_inputs = input_processing(**encoder_processing_inputs)
encoder_inputs.update(kwargs_encoder)
if "input_ids" in encoder_inputs:
encoder_inputs["pixel_values"] = encoder_inputs.pop("input_ids")
......@@ -639,9 +635,7 @@ class TFVisionEncoderDecoderModel(TFPreTrainedModel, TFCausalLanguageModelingLos
batch_size, sequence_length = shape_list(encoder_hidden_states)[:2]
encoder_attention_mask = tf.ones(shape=(batch_size, sequence_length), dtype=tf.int32)
decoder_processing_inputs = {
"func": self.decoder.call,
"config": self.decoder.config,
decoder_inputs = {
"input_ids": decoder_input_ids,
"attention_mask": decoder_attention_mask,
"encoder_hidden_states": encoder_hidden_states,
......@@ -653,13 +647,11 @@ class TFVisionEncoderDecoderModel(TFPreTrainedModel, TFCausalLanguageModelingLos
"past_key_values": past_key_values,
"return_dict": return_dict,
"training": training,
"kwargs_call": {},
}
# Add arguments to decoder from `kwargs_decoder`
decoder_processing_inputs.update(kwargs_decoder)
decoder_inputs.update(kwargs_decoder)
decoder_inputs = input_processing(**decoder_processing_inputs)
decoder_outputs = self.decoder(**decoder_inputs)
logits = decoder_outputs[0]
......
......@@ -486,7 +486,6 @@ class TFViTMainLayer(tf.keras.layers.Layer):
interpolate_pos_encoding: Optional[bool] = None,
return_dict: Optional[bool] = None,
training: bool = False,
**kwargs,
) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]:
if pixel_values is None:
......@@ -656,7 +655,6 @@ class TFViTModel(TFViTPreTrainedModel):
interpolate_pos_encoding: Optional[bool] = None,
return_dict: Optional[bool] = None,
training: bool = False,
**kwargs,
) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]:
r"""
Returns:
......@@ -757,7 +755,6 @@ class TFViTForImageClassification(TFViTPreTrainedModel, TFSequenceClassification
return_dict: Optional[bool] = None,
labels: Optional[Union[np.ndarray, tf.Tensor]] = None,
training: Optional[bool] = False,
**kwargs,
) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
r"""
labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*):
......
......@@ -647,7 +647,6 @@ class TFViTMAEMainLayer(tf.keras.layers.Layer):
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
training: bool = False,
**kwargs,
) -> Union[TFViTMAEModelOutput, Tuple[tf.Tensor]]:
embedding_output, mask, ids_restore = self.embeddings(
pixel_values=pixel_values, training=training, noise=noise
......@@ -811,7 +810,6 @@ class TFViTMAEModel(TFViTMAEPreTrainedModel):
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
training: bool = False,
**kwargs,
) -> Union[TFViTMAEModelOutput, Tuple[tf.Tensor]]:
r"""
Returns:
......@@ -1028,7 +1026,6 @@ class TFViTMAEForPreTraining(TFViTMAEPreTrainedModel):
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
training: bool = False,
**kwargs,
) -> Union[TFViTMAEForPreTrainingOutput, Tuple[tf.Tensor]]:
r"""
Returns:
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment