"tests/vscode:/vscode.git/clone" did not exist on "b8f1cde931392551f74a9abef5d2724c3cbc2208"
Unverified Commit d0422de5 authored by Sylvain Gugger's avatar Sylvain Gugger Committed by GitHub
Browse files

Fix doc mistakes (#14874)

* Remove double returns

* Last fixes

* Quality

* Last fix for Lxmert
parent e846a56c
......@@ -823,12 +823,12 @@ LXMERT_INPUTS_DOCSTRING = r"""
details.
[What are input IDs?](../glossary#input-ids)
visual_feats: (`torch.FloatTensor` of shape :obj:՝(batch_size, num_visual_features, visual_feat_dim)՝):
visual_feats: (`torch.FloatTensor` of shape `(batch_size, num_visual_features, visual_feat_dim)`):
This input represents visual features. They ROI pooled object features from bounding boxes using a
faster-RCNN model)
These are currently not provided by the transformers library.
visual_pos: (`torch.FloatTensor` of shape :obj:՝(batch_size, num_visual_features, visual_pos_dim)՝):
visual_pos: (`torch.FloatTensor` of shape `(batch_size, num_visual_features, visual_pos_dim)`):
This input represents spacial features corresponding to their relative (via index) visual features. The
pre-trained LXMERT model expects these spacial features to be normalized bounding boxes on a scale of 0 to
1.
......@@ -1176,7 +1176,7 @@ class LxmertForPreTraining(LxmertPreTrainedModel):
- 0 indicates that the sentence does not match the image,
- 1 indicates that the sentence does match the image.
ans: (`Torch.Tensor` of shape `(batch_size)`, *optional*):
ans (`Torch.Tensor` of shape `(batch_size)`, *optional*):
a one hot representation hof the correct answer *optional*
Returns:
......@@ -1397,8 +1397,6 @@ class LxmertForQuestionAnswering(LxmertPreTrainedModel):
r"""
labels: (`Torch.Tensor` of shape `(batch_size)`, *optional*):
A one-hot representation of the correct answer
Returns:
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
......
......@@ -878,12 +878,12 @@ LXMERT_INPUTS_DOCSTRING = r"""
details.
[What are input IDs?](../glossary#input-ids)
visual_feats: (`tf.Tensor` of shape :obj:՝(batch_size, num_visual_features, visual_feat_dim)՝):
visual_feats: (`tf.Tensor` of shape `(batch_size, num_visual_features, visual_feat_dim)`):
This input represents visual features. They ROI pooled object features from bounding boxes using a
faster-RCNN model)
These are currently not provided by the transformers library.
visual_pos: (`tf.Tensor` of shape :obj:՝(batch_size, num_visual_features, visual_feat_dim)՝):
visual_pos: (`tf.Tensor` of shape `(batch_size, num_visual_features, visual_feat_dim)`):
This input represents spacial features corresponding to their relative (via index) visual features. The
pre-trained LXMERT model expects these spacial features to be normalized bounding boxes on a scale of 0 to
1.
......@@ -1327,7 +1327,7 @@ class TFLxmertForPreTraining(TFLxmertPreTrainedModel):
- 0 indicates that the sentence does not match the image,
- 1 indicates that the sentence does match the image.
ans: (`Torch.Tensor` of shape `(batch_size)`, *optional*, defaults to :obj: *None*):
ans (`Torch.Tensor` of shape `(batch_size)`, *optional*, defaults to :obj: *None*):
a one hot representation hof the correct answer *optional*
Returns:
......
......@@ -30,7 +30,6 @@ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACT2FN
from ...file_utils import (
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
......@@ -1087,12 +1086,7 @@ class PerceiverForSequenceClassification(PerceiverPreTrainedModel):
self.post_init()
@add_start_docstrings_to_model_forward(PERCEIVER_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=PerceiverClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
@replace_return_docstrings(output_type=PerceiverClassifierOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
inputs=None,
......
......@@ -25,10 +25,10 @@ import tensorflow as tf
from ...activations_tf import get_tf_activation
from ...file_utils import (
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
is_tensorflow_probability_available,
replace_return_docstrings,
requires_backends,
)
from ...modeling_tf_outputs import (
......@@ -981,12 +981,7 @@ class TFTapasModel(TFTapasPreTrainedModel):
self.tapas = TFTapasMainLayer(config, name="tapas")
@add_start_docstrings_to_model_forward(TAPAS_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFBaseModelOutputWithPooling,
config_class=_CONFIG_FOR_DOC,
)
@replace_return_docstrings(output_type=TFBaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC)
def call(
self,
input_ids: Optional[TFModelInputType] = None,
......@@ -1085,12 +1080,7 @@ class TFTapasForMaskedLM(TFTapasPreTrainedModel, TFMaskedLanguageModelingLoss):
return self.lm_head.predictions
@add_start_docstrings_to_model_forward(TAPAS_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFMaskedLMOutput,
config_class=_CONFIG_FOR_DOC,
)
@replace_return_docstrings(output_type=TFMaskedLMOutput, config_class=_CONFIG_FOR_DOC)
def call(
self,
input_ids: Optional[TFModelInputType] = None,
......@@ -1314,12 +1304,7 @@ class TFTapasForQuestionAnswering(TFTapasPreTrainedModel):
self.config = config
@add_start_docstrings_to_model_forward(TAPAS_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFTableQuestionAnsweringOutput,
config_class=_CONFIG_FOR_DOC,
)
@replace_return_docstrings(output_type=TFTableQuestionAnsweringOutput, config_class=_CONFIG_FOR_DOC)
def call(
self,
input_ids: Optional[TFModelInputType] = None,
......@@ -1664,12 +1649,7 @@ class TFTapasForSequenceClassification(TFTapasPreTrainedModel, TFSequenceClassif
)
@add_start_docstrings_to_model_forward(TAPAS_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFSequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
@replace_return_docstrings(output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC)
def call(
self,
input_ids: Optional[TFModelInputType] = None,
......
......@@ -716,6 +716,7 @@ class VisualBertModel(VisualBertPreTrainedModel):
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(VISUAL_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment