Unverified Commit 16399d61 authored by Jack McDonald's avatar Jack McDonald Committed by GitHub
Browse files

Add type annotations for Perceiver (#16174)

parent 015de6f0
...@@ -19,7 +19,7 @@ import math ...@@ -19,7 +19,7 @@ import math
from dataclasses import dataclass from dataclasses import dataclass
from functools import reduce from functools import reduce
from operator import __add__ from operator import __add__
from typing import Any, Callable, Mapping, Optional, Tuple from typing import Any, Callable, Dict, Mapping, Optional, Tuple, Union
import numpy as np import numpy as np
import torch import torch
...@@ -986,15 +986,15 @@ class PerceiverForMaskedLM(PerceiverPreTrainedModel): ...@@ -986,15 +986,15 @@ class PerceiverForMaskedLM(PerceiverPreTrainedModel):
@replace_return_docstrings(output_type=PerceiverMaskedLMOutput, config_class=_CONFIG_FOR_DOC) @replace_return_docstrings(output_type=PerceiverMaskedLMOutput, config_class=_CONFIG_FOR_DOC)
def forward( def forward(
self, self,
inputs=None, inputs: Optional[torch.Tensor] = None,
attention_mask=None, attention_mask: Optional[torch.Tensor] = None,
head_mask=None, head_mask: Optional[torch.Tensor] = None,
output_attentions=None, output_attentions: Optional[bool] = None,
output_hidden_states=None, output_hidden_states: Optional[bool] = None,
labels=None, labels: Optional[torch.Tensor] = None,
return_dict=None, return_dict: Optional[bool] = None,
input_ids=None, input_ids: Optional[torch.Tensor] = None,
): ) -> Union[Tuple, PerceiverMaskedLMOutput]:
r""" r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
...@@ -1103,15 +1103,15 @@ class PerceiverForSequenceClassification(PerceiverPreTrainedModel): ...@@ -1103,15 +1103,15 @@ class PerceiverForSequenceClassification(PerceiverPreTrainedModel):
@replace_return_docstrings(output_type=PerceiverClassifierOutput, config_class=_CONFIG_FOR_DOC) @replace_return_docstrings(output_type=PerceiverClassifierOutput, config_class=_CONFIG_FOR_DOC)
def forward( def forward(
self, self,
inputs=None, inputs: Optional[torch.Tensor] = None,
attention_mask=None, attention_mask: Optional[torch.Tensor] = None,
head_mask=None, head_mask: Optional[torch.Tensor] = None,
output_attentions=None, output_attentions: Optional[bool] = None,
output_hidden_states=None, output_hidden_states: Optional[bool] = None,
labels=None, labels: Optional[torch.Tensor] = None,
return_dict=None, return_dict: Optional[bool] = None,
input_ids=None, input_ids: Optional[torch.Tensor] = None,
): ) -> Union[Tuple, PerceiverClassifierOutput]:
r""" r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the classification/regression loss. Indices should be in `[0, ..., config.num_labels - Labels for computing the classification/regression loss. Indices should be in `[0, ..., config.num_labels -
...@@ -1236,15 +1236,15 @@ class PerceiverForImageClassificationLearned(PerceiverPreTrainedModel): ...@@ -1236,15 +1236,15 @@ class PerceiverForImageClassificationLearned(PerceiverPreTrainedModel):
@replace_return_docstrings(output_type=PerceiverClassifierOutput, config_class=_CONFIG_FOR_DOC) @replace_return_docstrings(output_type=PerceiverClassifierOutput, config_class=_CONFIG_FOR_DOC)
def forward( def forward(
self, self,
inputs=None, inputs: Optional[torch.Tensor] = None,
attention_mask=None, attention_mask: Optional[torch.Tensor] = None,
head_mask=None, head_mask: Optional[torch.Tensor] = None,
output_attentions=None, output_attentions: Optional[bool] = None,
output_hidden_states=None, output_hidden_states: Optional[bool] = None,
labels=None, labels: Optional[torch.Tensor] = None,
return_dict=None, return_dict: Optional[bool] = None,
pixel_values=None, pixel_values: Optional[torch.Tensor] = None,
): ) -> Union[Tuple, PerceiverClassifierOutput]:
r""" r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ..., Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
...@@ -1373,15 +1373,15 @@ class PerceiverForImageClassificationFourier(PerceiverPreTrainedModel): ...@@ -1373,15 +1373,15 @@ class PerceiverForImageClassificationFourier(PerceiverPreTrainedModel):
@replace_return_docstrings(output_type=PerceiverClassifierOutput, config_class=_CONFIG_FOR_DOC) @replace_return_docstrings(output_type=PerceiverClassifierOutput, config_class=_CONFIG_FOR_DOC)
def forward( def forward(
self, self,
inputs=None, inputs: Optional[torch.Tensor] = None,
attention_mask=None, attention_mask: Optional[torch.Tensor] = None,
head_mask=None, head_mask: Optional[torch.Tensor] = None,
output_attentions=None, output_attentions: Optional[bool] = None,
output_hidden_states=None, output_hidden_states: Optional[bool] = None,
labels=None, labels: Optional[torch.Tensor] = None,
return_dict=None, return_dict: Optional[bool] = None,
pixel_values=None, pixel_values: Optional[torch.Tensor] = None,
): ) -> Union[Tuple, PerceiverClassifierOutput]:
r""" r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ..., Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
...@@ -1510,15 +1510,15 @@ class PerceiverForImageClassificationConvProcessing(PerceiverPreTrainedModel): ...@@ -1510,15 +1510,15 @@ class PerceiverForImageClassificationConvProcessing(PerceiverPreTrainedModel):
@replace_return_docstrings(output_type=PerceiverClassifierOutput, config_class=_CONFIG_FOR_DOC) @replace_return_docstrings(output_type=PerceiverClassifierOutput, config_class=_CONFIG_FOR_DOC)
def forward( def forward(
self, self,
inputs=None, inputs: Optional[torch.Tensor] = None,
attention_mask=None, attention_mask: Optional[torch.Tensor] = None,
head_mask=None, head_mask: Optional[torch.Tensor] = None,
output_attentions=None, output_attentions: Optional[bool] = None,
output_hidden_states=None, output_hidden_states: Optional[bool] = None,
labels=None, labels: Optional[torch.Tensor] = None,
return_dict=None, return_dict: Optional[bool] = None,
pixel_values=None, pixel_values: Optional[torch.Tensor] = None,
): ) -> Union[Tuple, PerceiverClassifierOutput]:
r""" r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ..., Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
...@@ -1664,14 +1664,14 @@ class PerceiverForOpticalFlow(PerceiverPreTrainedModel): ...@@ -1664,14 +1664,14 @@ class PerceiverForOpticalFlow(PerceiverPreTrainedModel):
@replace_return_docstrings(output_type=PerceiverClassifierOutput, config_class=_CONFIG_FOR_DOC) @replace_return_docstrings(output_type=PerceiverClassifierOutput, config_class=_CONFIG_FOR_DOC)
def forward( def forward(
self, self,
inputs=None, inputs: Optional[torch.Tensor] = None,
attention_mask=None, attention_mask: Optional[torch.Tensor] = None,
head_mask=None, head_mask: Optional[torch.Tensor] = None,
output_attentions=None, output_attentions: Optional[bool] = None,
output_hidden_states=None, output_hidden_states: Optional[bool] = None,
labels=None, labels: Optional[torch.Tensor] = None,
return_dict=None, return_dict: Optional[bool] = None,
): ) -> Union[Tuple, PerceiverClassifierOutput]:
r""" r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the optical flow loss. Indices should be in `[0, ..., config.num_labels - 1]`. Labels for computing the optical flow loss. Indices should be in `[0, ..., config.num_labels - 1]`.
...@@ -1873,15 +1873,15 @@ class PerceiverForMultimodalAutoencoding(PerceiverPreTrainedModel): ...@@ -1873,15 +1873,15 @@ class PerceiverForMultimodalAutoencoding(PerceiverPreTrainedModel):
@replace_return_docstrings(output_type=PerceiverClassifierOutput, config_class=_CONFIG_FOR_DOC) @replace_return_docstrings(output_type=PerceiverClassifierOutput, config_class=_CONFIG_FOR_DOC)
def forward( def forward(
self, self,
inputs=None, inputs: Optional[torch.Tensor] = None,
attention_mask=None, attention_mask: Optional[torch.Tensor] = None,
subsampled_output_points=None, subsampled_output_points: Optional[Dict[str, torch.tensor]] = None,
head_mask=None, head_mask: Optional[torch.Tensor] = None,
output_attentions=None, output_attentions: Optional[bool] = None,
output_hidden_states=None, output_hidden_states: Optional[bool] = None,
labels=None, labels: Optional[torch.Tensor] = None,
return_dict=None, return_dict: Optional[bool] = None,
): ) -> Union[Tuple, PerceiverClassifierOutput]:
r""" r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ..., Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment