Unverified Commit d4d23141 authored by IMvision12's avatar IMvision12 Committed by GitHub
Browse files

remaining pytorch type hints (#20217)

* Update modeling_flava.py

* Update modeling_markuplm.py

* Update modeling_glpn.py

* Update modeling_roc_bert.py

* Update modeling_segformer.py

* Update modeling_tapas.py

* Update modeling_tapas.py

* Update modeling_tapas.py

* Update modeling_tapas.py

* Update modeling_trocr.py

* Update modeling_videomae.py

* Update modeling_videomae.py

* Update modeling_videomae.py

* Update modeling_yolos.py

* Update modeling_wav2vec2.py

* Update modeling_jukebox.py

* Update modeling_jukebox.py

* Update modeling_jukebox.py

* Update modeling_jukebox.py
parent 9ea1dbd2
...@@ -1795,7 +1795,7 @@ class FlavaForPreTraining(FlavaPreTrainedModel): ...@@ -1795,7 +1795,7 @@ class FlavaForPreTraining(FlavaPreTrainedModel):
output_hidden_states: bool = True, output_hidden_states: bool = True,
return_dict: Optional[bool] = None, return_dict: Optional[bool] = None,
return_loss: Optional[bool] = None, return_loss: Optional[bool] = None,
): ) -> Union[Tuple[torch.Tensor], FlavaForPreTrainingOutput]:
""" """
Examples: Examples:
```python ```python
......
...@@ -698,12 +698,12 @@ class GLPNForDepthEstimation(GLPNPreTrainedModel): ...@@ -698,12 +698,12 @@ class GLPNForDepthEstimation(GLPNPreTrainedModel):
@replace_return_docstrings(output_type=DepthEstimatorOutput, config_class=_CONFIG_FOR_DOC) @replace_return_docstrings(output_type=DepthEstimatorOutput, config_class=_CONFIG_FOR_DOC)
def forward( def forward(
self, self,
pixel_values, pixel_values: torch.FloatTensor,
labels=None, labels: Optional[torch.FloatTensor] = None,
output_attentions=None, output_attentions: Optional[bool] = None,
output_hidden_states=None, output_hidden_states: Optional[bool] = None,
return_dict=None, return_dict: Optional[bool] = None,
): ) -> Union[Tuple[torch.Tensor], DepthEstimatorOutput]:
r""" r"""
labels (`torch.FloatTensor` of shape `(batch_size, height, width)`, *optional*): labels (`torch.FloatTensor` of shape `(batch_size, height, width)`, *optional*):
Ground truth depth estimation maps for computing the loss. Ground truth depth estimation maps for computing the loss.
......
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
import math import math
import os import os
from typing import List from typing import List, Optional, Tuple
import numpy as np import numpy as np
import torch import torch
...@@ -737,7 +737,7 @@ class JukeboxVQVAE(PreTrainedModel): ...@@ -737,7 +737,7 @@ class JukeboxVQVAE(PreTrainedModel):
] ]
return self.decode(music_tokens) return self.decode(music_tokens)
def forward(self, raw_audio): def forward(self, raw_audio: torch.FloatTensor) -> Tuple[torch.Tensor, torch.Tensor]:
""" """
Forward pass of the VQ-VAE, encodes the `raw_audio` to latent states, which are then decoded for each level. Forward pass of the VQ-VAE, encodes the `raw_audio` to latent states, which are then decoded for each level.
The commit loss, which ensure that the encoder's computed embeddings are close to the codebook vectors, is The commit loss, which ensure that the encoder's computed embeddings are close to the codebook vectors, is
...@@ -748,7 +748,7 @@ class JukeboxVQVAE(PreTrainedModel): ...@@ -748,7 +748,7 @@ class JukeboxVQVAE(PreTrainedModel):
Audio input which will be encoded and decoded. Audio input which will be encoded and decoded.
Returns: Returns:
`Tuple[torch.Tensor, torch.Tensor` `Tuple[torch.Tensor, torch.Tensor]`
Example: Example:
...@@ -2228,7 +2228,13 @@ class JukeboxPrior(PreTrainedModel): ...@@ -2228,7 +2228,13 @@ class JukeboxPrior(PreTrainedModel):
else: else:
return loss, metrics return loss, metrics
def forward(self, hidden_states, metadata=None, decode=False, get_preds=False): def forward(
self,
hidden_states: torch.Tensor,
metadata: Optional[List[torch.LongTensor]],
decode: Optional[bool] = False,
get_preds: Optional[bool] = False,
) -> List[torch.Tensor]:
""" """
Encode the hidden states using the `vqvae` encoder, and then predicts the next token in the `forward_tokens` Encode the hidden states using the `vqvae` encoder, and then predicts the next token in the `forward_tokens`
function. The loss is the sum of the `encoder` loss and the `decoder` loss. function. The loss is the sum of the `encoder` loss and the `decoder` loss.
......
...@@ -836,18 +836,18 @@ class MarkupLMModel(MarkupLMPreTrainedModel): ...@@ -836,18 +836,18 @@ class MarkupLMModel(MarkupLMPreTrainedModel):
@replace_return_docstrings(output_type=BaseModelOutputWithPoolingAndCrossAttentions, config_class=_CONFIG_FOR_DOC) @replace_return_docstrings(output_type=BaseModelOutputWithPoolingAndCrossAttentions, config_class=_CONFIG_FOR_DOC)
def forward( def forward(
self, self,
input_ids=None, input_ids: Optional[torch.LongTensor] = None,
xpath_tags_seq=None, xpath_tags_seq: Optional[torch.LongTensor] = None,
xpath_subs_seq=None, xpath_subs_seq: Optional[torch.LongTensor] = None,
attention_mask=None, attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids=None, token_type_ids: Optional[torch.LongTensor] = None,
position_ids=None, position_ids: Optional[torch.LongTensor] = None,
head_mask=None, head_mask: Optional[torch.FloatTensor] = None,
inputs_embeds=None, inputs_embeds: Optional[torch.FloatTensor] = None,
output_attentions=None, output_attentions: Optional[bool] = None,
output_hidden_states=None, output_hidden_states: Optional[bool] = None,
return_dict=None, return_dict: Optional[bool] = None,
): ) -> Union[Tuple, BaseModelOutputWithPoolingAndCrossAttentions]:
r""" r"""
Returns: Returns:
......
...@@ -1800,7 +1800,7 @@ class RoCBertForTokenClassification(RoCBertPreTrainedModel): ...@@ -1800,7 +1800,7 @@ class RoCBertForTokenClassification(RoCBertPreTrainedModel):
output_attentions: Optional[bool] = None, output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None, output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None, return_dict: Optional[bool] = None,
): ) -> Union[Tuple, TokenClassifierOutput]:
r""" r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
......
...@@ -706,7 +706,7 @@ class SegformerDecodeHead(SegformerPreTrainedModel): ...@@ -706,7 +706,7 @@ class SegformerDecodeHead(SegformerPreTrainedModel):
self.config = config self.config = config
def forward(self, encoder_hidden_states: torch.FloatTensor): def forward(self, encoder_hidden_states: torch.FloatTensor) -> torch.Tensor:
batch_size = encoder_hidden_states[-1].shape[0] batch_size = encoder_hidden_states[-1].shape[0]
all_hidden_states = () all_hidden_states = ()
......
...@@ -19,7 +19,7 @@ import enum ...@@ -19,7 +19,7 @@ import enum
import math import math
import os import os
from dataclasses import dataclass from dataclasses import dataclass
from typing import Optional, Tuple from typing import Optional, Tuple, Union
import torch import torch
import torch.utils.checkpoint import torch.utils.checkpoint
...@@ -878,18 +878,18 @@ class TapasModel(TapasPreTrainedModel): ...@@ -878,18 +878,18 @@ class TapasModel(TapasPreTrainedModel):
@replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC) @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC)
def forward( def forward(
self, self,
input_ids=None, input_ids: Optional[torch.LongTensor] = None,
attention_mask=None, attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids=None, token_type_ids: Optional[torch.LongTensor] = None,
position_ids=None, position_ids: Optional[torch.LongTensor] = None,
head_mask=None, head_mask: Optional[torch.FloatTensor] = None,
inputs_embeds=None, inputs_embeds: Optional[torch.FloatTensor] = None,
encoder_hidden_states=None, encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask=None, encoder_attention_mask: Optional[torch.FloatTensor] = None,
output_attentions=None, output_attentions: Optional[bool] = None,
output_hidden_states=None, output_hidden_states: Optional[bool] = None,
return_dict=None, return_dict: Optional[bool] = None,
): ) -> Union[Tuple, BaseModelOutputWithPooling]:
r""" r"""
Returns: Returns:
...@@ -1013,20 +1013,20 @@ class TapasForMaskedLM(TapasPreTrainedModel): ...@@ -1013,20 +1013,20 @@ class TapasForMaskedLM(TapasPreTrainedModel):
@replace_return_docstrings(output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC) @replace_return_docstrings(output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC)
def forward( def forward(
self, self,
input_ids=None, input_ids: Optional[torch.LongTensor] = None,
attention_mask=None, attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids=None, token_type_ids: Optional[torch.LongTensor] = None,
position_ids=None, position_ids: Optional[torch.LongTensor] = None,
head_mask=None, head_mask: Optional[torch.FloatTensor] = None,
inputs_embeds=None, inputs_embeds: Optional[torch.FloatTensor] = None,
encoder_hidden_states=None, encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask=None, encoder_attention_mask: Optional[torch.FloatTensor] = None,
labels=None, labels: Optional[torch.LongTensor] = None,
output_attentions=None, output_attentions: Optional[bool] = None,
output_hidden_states=None, output_hidden_states: Optional[bool] = None,
return_dict=None, return_dict: Optional[bool] = None,
**kwargs **kwargs
): ) -> Union[Tuple, MaskedLMOutput]:
r""" r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
...@@ -1144,22 +1144,22 @@ class TapasForQuestionAnswering(TapasPreTrainedModel): ...@@ -1144,22 +1144,22 @@ class TapasForQuestionAnswering(TapasPreTrainedModel):
@replace_return_docstrings(output_type=TableQuestionAnsweringOutput, config_class=_CONFIG_FOR_DOC) @replace_return_docstrings(output_type=TableQuestionAnsweringOutput, config_class=_CONFIG_FOR_DOC)
def forward( def forward(
self, self,
input_ids=None, input_ids: Optional[torch.LongTensor] = None,
attention_mask=None, attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids=None, token_type_ids: Optional[torch.LongTensor] = None,
position_ids=None, position_ids: Optional[torch.LongTensor] = None,
head_mask=None, head_mask: Optional[torch.FloatTensor] = None,
inputs_embeds=None, inputs_embeds: Optional[torch.FloatTensor] = None,
table_mask=None, table_mask: Optional[torch.LongTensor] = None,
labels=None, labels: Optional[torch.LongTensor] = None,
aggregation_labels=None, aggregation_labels: Optional[torch.LongTensor] = None,
float_answer=None, float_answer: Optional[torch.FloatTensor] = None,
numeric_values=None, numeric_values: Optional[torch.FloatTensor] = None,
numeric_values_scale=None, numeric_values_scale: Optional[torch.FloatTensor] = None,
output_attentions=None, output_attentions: Optional[bool] = None,
output_hidden_states=None, output_hidden_states: Optional[bool] = None,
return_dict=None, return_dict: Optional[bool] = None,
): ) -> Union[Tuple, TableQuestionAnsweringOutput]:
r""" r"""
table_mask (`torch.LongTensor` of shape `(batch_size, seq_length)`, *optional*): table_mask (`torch.LongTensor` of shape `(batch_size, seq_length)`, *optional*):
Mask for the table. Indicates which tokens belong to the table (1). Question tokens, table headers and Mask for the table. Indicates which tokens belong to the table (1). Question tokens, table headers and
...@@ -1466,17 +1466,17 @@ class TapasForSequenceClassification(TapasPreTrainedModel): ...@@ -1466,17 +1466,17 @@ class TapasForSequenceClassification(TapasPreTrainedModel):
@replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC) @replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC)
def forward( def forward(
self, self,
input_ids=None, input_ids: Optional[torch.LongTensor] = None,
attention_mask=None, attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids=None, token_type_ids: Optional[torch.LongTensor] = None,
position_ids=None, position_ids: Optional[torch.LongTensor] = None,
head_mask=None, head_mask: Optional[torch.FloatTensor] = None,
inputs_embeds=None, inputs_embeds: Optional[torch.FloatTensor] = None,
labels=None, labels: Optional[torch.LongTensor] = None,
output_attentions=None, output_attentions: Optional[bool] = None,
output_hidden_states=None, output_hidden_states: Optional[bool] = None,
return_dict=None, return_dict: Optional[bool] = None,
): ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]:
r""" r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
......
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
import copy import copy
import math import math
import random import random
from typing import Optional, Tuple from typing import Optional, Tuple, Union
import torch import torch
from torch import nn from torch import nn
...@@ -820,20 +820,20 @@ class TrOCRForCausalLM(TrOCRPreTrainedModel): ...@@ -820,20 +820,20 @@ class TrOCRForCausalLM(TrOCRPreTrainedModel):
@replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC) @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
def forward( def forward(
self, self,
input_ids=None, input_ids: Optional[torch.LongTensor] = None,
attention_mask=None, attention_mask: Optional[torch.Tensor] = None,
encoder_hidden_states=None, encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask=None, encoder_attention_mask: Optional[torch.LongTensor] = None,
head_mask=None, head_mask: Optional[torch.Tensor] = None,
cross_attn_head_mask=None, cross_attn_head_mask: Optional[torch.Tensor] = None,
past_key_values=None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
inputs_embeds=None, inputs_embeds: Optional[torch.FloatTensor] = None,
labels=None, labels: Optional[torch.LongTensor] = None,
use_cache=None, use_cache: Optional[bool] = None,
output_attentions=None, output_attentions: Optional[bool] = None,
output_hidden_states=None, output_hidden_states: Optional[bool] = None,
return_dict=None, return_dict: Optional[bool] = None,
): ) -> Union[Tuple, CausalLMOutputWithCrossAttentions]:
r""" r"""
Args: Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
......
...@@ -565,13 +565,13 @@ class VideoMAEModel(VideoMAEPreTrainedModel): ...@@ -565,13 +565,13 @@ class VideoMAEModel(VideoMAEPreTrainedModel):
@replace_return_docstrings(output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC) @replace_return_docstrings(output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC)
def forward( def forward(
self, self,
pixel_values, pixel_values: torch.FloatTensor,
bool_masked_pos=None, bool_masked_pos: Optional[torch.BoolTensor] = None,
head_mask=None, head_mask: Optional[torch.Tensor] = None,
output_attentions=None, output_attentions: Optional[bool] = None,
output_hidden_states=None, output_hidden_states: Optional[bool] = None,
return_dict=None, return_dict: Optional[bool] = None,
): ) -> Union[Tuple, BaseModelOutput]:
r""" r"""
Returns: Returns:
...@@ -753,13 +753,13 @@ class VideoMAEForPreTraining(VideoMAEPreTrainedModel): ...@@ -753,13 +753,13 @@ class VideoMAEForPreTraining(VideoMAEPreTrainedModel):
@replace_return_docstrings(output_type=VideoMAEForPreTrainingOutput, config_class=_CONFIG_FOR_DOC) @replace_return_docstrings(output_type=VideoMAEForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
def forward( def forward(
self, self,
pixel_values, pixel_values: torch.FloatTensor,
bool_masked_pos, bool_masked_pos: torch.BoolTensor,
head_mask=None, head_mask: Optional[torch.Tensor] = None,
output_attentions=None, output_attentions: Optional[bool] = None,
output_hidden_states=None, output_hidden_states: Optional[bool] = None,
return_dict=None, return_dict: Optional[bool] = None,
): ) -> Union[tuple, VideoMAEForPreTrainingOutput]:
r""" r"""
Returns: Returns:
...@@ -926,7 +926,7 @@ class VideoMAEForVideoClassification(VideoMAEPreTrainedModel): ...@@ -926,7 +926,7 @@ class VideoMAEForVideoClassification(VideoMAEPreTrainedModel):
output_attentions: Optional[bool] = None, output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None, output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None, return_dict: Optional[bool] = None,
): ) -> Union[Tuple, ImageClassifierOutput]:
r""" r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ..., Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
......
...@@ -1574,13 +1574,13 @@ class Wav2Vec2ForMaskedLM(Wav2Vec2PreTrainedModel): ...@@ -1574,13 +1574,13 @@ class Wav2Vec2ForMaskedLM(Wav2Vec2PreTrainedModel):
@add_start_docstrings_to_model_forward(WAV_2_VEC_2_INPUTS_DOCSTRING) @add_start_docstrings_to_model_forward(WAV_2_VEC_2_INPUTS_DOCSTRING)
def forward( def forward(
self, self,
input_values, input_values: torch.FloatTensor,
attention_mask=None, attention_mask: Optional[torch.LongTensor] = None,
output_attentions=None, output_attentions: Optional[bool] = None,
output_hidden_states=None, output_hidden_states: Optional[bool] = None,
return_dict=None, return_dict: Optional[bool] = None,
labels=None, labels: Optional[torch.Tensor] = None,
): ) -> Union[Tuple, MaskedLMOutput]:
return_dict = return_dict if return_dict is not None else self.config.use_return_dict return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.wav2vec2( outputs = self.wav2vec2(
......
...@@ -641,7 +641,7 @@ class YolosModel(YolosPreTrainedModel): ...@@ -641,7 +641,7 @@ class YolosModel(YolosPreTrainedModel):
output_attentions: Optional[bool] = None, output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None, output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None, return_dict: Optional[bool] = None,
): ) -> Union[Tuple, BaseModelOutputWithPooling]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = ( output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment