"git@developer.sourcefind.cn:chenpangpang/transformers.git" did not exist on "5e09af2acde21f232a6ed2ad2972c8f2269dcecf"
Unverified Commit bb5a2f2f authored by mollerup23's avatar mollerup23 Committed by GitHub
Browse files

Adding type hints to call() functions in this file (#21548)



* Adding type hints to call() functions in this file

* make fixup

* Update src/transformers/models/marian/modeling_tf_marian.py

* Update src/transformers/models/marian/modeling_tf_marian.py

* Update src/transformers/models/marian/modeling_tf_marian.py

* Update src/transformers/models/marian/modeling_tf_marian.py

* Update src/transformers/models/marian/modeling_tf_marian.py

* Update src/transformers/models/marian/modeling_tf_marian.py

* Update src/transformers/models/marian/modeling_tf_marian.py

* Update src/transformers/models/marian/modeling_tf_marian.py

---------
Co-authored-by: default avatarMatt <rocketknight1@gmail.com>
Co-authored-by: default avatarMatt <Rocketknight1@users.noreply.github.com>
parent 78a53d59
...@@ -708,14 +708,14 @@ class TFMarianEncoder(tf.keras.layers.Layer): ...@@ -708,14 +708,14 @@ class TFMarianEncoder(tf.keras.layers.Layer):
@unpack_inputs @unpack_inputs
def call( def call(
self, self,
input_ids=None, input_ids: Optional[tf.Tensor] = None,
inputs_embeds=None, inputs_embeds: Optional[tf.Tensor] = None,
attention_mask=None, attention_mask: Optional[tf.Tensor] = None,
head_mask=None, head_mask: Optional[tf.Tensor] = None,
output_attentions=None, output_attentions: Optional[bool] = None,
output_hidden_states=None, output_hidden_states: Optional[bool] = None,
return_dict=None, return_dict: Optional[bool] = None,
training=False, training: bool = False,
): ):
""" """
Args: Args:
...@@ -879,20 +879,20 @@ class TFMarianDecoder(tf.keras.layers.Layer): ...@@ -879,20 +879,20 @@ class TFMarianDecoder(tf.keras.layers.Layer):
@unpack_inputs @unpack_inputs
def call( def call(
self, self,
input_ids=None, input_ids: Optional[tf.Tensor] = None,
inputs_embeds=None, inputs_embeds: Optional[tf.Tensor] = None,
attention_mask=None, attention_mask: Optional[tf.Tensor] = None,
position_ids=None, position_ids: Optional[tf.Tensor] = None,
encoder_hidden_states=None, encoder_hidden_states: Optional[tf.Tensor] = None,
encoder_attention_mask=None, encoder_attention_mask: Optional[tf.Tensor] = None,
head_mask=None, head_mask: Optional[tf.Tensor] = None,
cross_attn_head_mask=None, cross_attn_head_mask: Optional[tf.Tensor] = None,
past_key_values=None, past_key_values: Optional[Tuple[Tuple[tf.Tensor]]] = None,
use_cache=None, use_cache: Optional[bool] = None,
output_attentions=None, output_attentions: Optional[bool] = None,
output_hidden_states=None, output_hidden_states: Optional[bool] = None,
return_dict=None, return_dict: Optional[bool] = None,
training=False, training: bool = False,
): ):
r""" r"""
Args: Args:
...@@ -1115,23 +1115,23 @@ class TFMarianMainLayer(tf.keras.layers.Layer): ...@@ -1115,23 +1115,23 @@ class TFMarianMainLayer(tf.keras.layers.Layer):
@unpack_inputs @unpack_inputs
def call( def call(
self, self,
input_ids=None, input_ids: Optional[tf.Tensor] = None,
attention_mask=None, attention_mask: Optional[tf.Tensor] = None,
decoder_input_ids=None, decoder_input_ids: Optional[tf.Tensor] = None,
decoder_attention_mask=None, decoder_attention_mask: Optional[tf.Tensor] = None,
decoder_position_ids=None, decoder_position_ids: Optional[tf.Tensor] = None,
head_mask=None, head_mask: Optional[tf.Tensor] = None,
decoder_head_mask=None, decoder_head_mask: Optional[tf.Tensor] = None,
cross_attn_head_mask=None, cross_attn_head_mask: Optional[tf.Tensor] = None,
encoder_outputs: Optional[Union[Tuple, TFBaseModelOutput]] = None, encoder_outputs: Optional[Union[Tuple, TFBaseModelOutput]] = None,
past_key_values=None, past_key_values: Tuple[Tuple[tf.Tensor]] = None,
inputs_embeds=None, inputs_embeds: Optional[tf.Tensor] = None,
decoder_inputs_embeds=None, decoder_inputs_embeds: Optional[tf.Tensor] = None,
use_cache=None, use_cache: Optional[bool] = None,
output_attentions=None, output_attentions: Optional[bool] = None,
output_hidden_states=None, output_hidden_states: Optional[bool] = None,
return_dict=None, return_dict: Optional[bool] = None,
training=False, training: bool = False,
**kwargs, **kwargs,
): ):
if decoder_input_ids is None and decoder_inputs_embeds is None: if decoder_input_ids is None and decoder_inputs_embeds is None:
...@@ -1220,23 +1220,23 @@ class TFMarianModel(TFMarianPreTrainedModel): ...@@ -1220,23 +1220,23 @@ class TFMarianModel(TFMarianPreTrainedModel):
) )
def call( def call(
self, self,
input_ids=None, input_ids: Optional[tf.Tensor] = None,
attention_mask=None, attention_mask: Optional[tf.Tensor] = None,
decoder_input_ids=None, decoder_input_ids: Optional[tf.Tensor] = None,
decoder_attention_mask=None, decoder_attention_mask: Optional[tf.Tensor] = None,
decoder_position_ids=None, decoder_position_ids: Optional[tf.Tensor] = None,
head_mask=None, head_mask: Optional[tf.Tensor] = None,
decoder_head_mask=None, decoder_head_mask: Optional[tf.Tensor] = None,
cross_attn_head_mask=None, cross_attn_head_mask: Optional[tf.Tensor] = None,
encoder_outputs: Optional[Union[Tuple, TFBaseModelOutput]] = None, encoder_outputs: Optional[tf.Tensor] = None,
past_key_values=None, past_key_values: Optional[Tuple[Tuple[tf.Tensor]]] = None,
inputs_embeds=None, inputs_embeds: Optional[tf.Tensor] = None,
decoder_inputs_embeds=None, decoder_inputs_embeds: Optional[tf.Tensor] = None,
use_cache=None, use_cache: Optional[bool] = None,
output_attentions=None, output_attentions: Optional[bool] = None,
output_hidden_states=None, output_hidden_states: Optional[bool] = None,
return_dict=None, return_dict: Optional[bool] = None,
training=False, training: bool = False,
**kwargs, **kwargs,
): ):
outputs = self.model( outputs = self.model(
...@@ -1348,24 +1348,24 @@ class TFMarianMTModel(TFMarianPreTrainedModel, TFCausalLanguageModelingLoss): ...@@ -1348,24 +1348,24 @@ class TFMarianMTModel(TFMarianPreTrainedModel, TFCausalLanguageModelingLoss):
@add_end_docstrings(MARIAN_GENERATION_EXAMPLE) @add_end_docstrings(MARIAN_GENERATION_EXAMPLE)
def call( def call(
self, self,
input_ids=None, input_ids: Optional[tf.Tensor] = None,
attention_mask=None, attention_mask: Optional[tf.Tensor] = None,
decoder_input_ids=None, decoder_input_ids: Optional[tf.Tensor] = None,
decoder_attention_mask=None, decoder_attention_mask: Optional[tf.Tensor] = None,
decoder_position_ids=None, decoder_position_ids: Optional[tf.Tensor] = None,
head_mask=None, head_mask: Optional[tf.Tensor] = None,
decoder_head_mask=None, decoder_head_mask: Optional[tf.Tensor] = None,
cross_attn_head_mask=None, cross_attn_head_mask: Optional[tf.Tensor] = None,
encoder_outputs: Optional[TFBaseModelOutput] = None, encoder_outputs: Optional[TFBaseModelOutput] = None,
past_key_values=None, past_key_values: Optional[Tuple[Tuple[tf.Tensor]]] = None,
inputs_embeds=None, inputs_embeds: Optional[tf.Tensor] = None,
decoder_inputs_embeds=None, decoder_inputs_embeds: Optional[tf.Tensor] = None,
use_cache=None, use_cache: Optional[bool] = None,
output_attentions=None, output_attentions: Optional[bool] = None,
output_hidden_states=None, output_hidden_states: Optional[bool] = None,
return_dict=None, return_dict: Optional[bool] = None,
labels=None, labels: Optional[tf.Tensor] = None,
training=False, training: bool = False,
): ):
r""" r"""
labels (`tf.tensor` of shape `(batch_size, sequence_length)`, *optional*): labels (`tf.tensor` of shape `(batch_size, sequence_length)`, *optional*):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment