Unverified Commit 123b597f authored by Gunjan Chhablani's avatar Gunjan Chhablani Committed by GitHub
Browse files

Fix examples (#11990)

parent 88ca6a23
......@@ -728,6 +728,9 @@ class VisualBertModel(VisualBertPreTrainedModel):
return_dict=None,
):
r"""
Returns:
Example::
>>> # Assumption: `get_visual_embeddings(image)` gets the visual embeddings of the image.
......@@ -1016,6 +1019,7 @@ class VisualBertForMultipleChoice(VisualBertPreTrainedModel):
@add_start_docstrings_to_model_forward(
VISUAL_BERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
)
@replace_return_docstrings(output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
......@@ -1039,6 +1043,8 @@ class VisualBertForMultipleChoice(VisualBertPreTrainedModel):
num_choices-1]`` where :obj:`num_choices` is the size of the second dimension of the input tensors.
(See :obj:`input_ids` above)
Returns:
Example::
>>> from transformers import BertTokenizer, VisualBertForMultipleChoice
......@@ -1160,6 +1166,7 @@ class VisualBertForQuestionAnswering(VisualBertPreTrainedModel):
self.init_weights()
@add_start_docstrings_to_model_forward(VISUAL_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
......@@ -1182,6 +1189,7 @@ class VisualBertForQuestionAnswering(VisualBertPreTrainedModel):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
config.num_labels - 1]`. A KLDivLoss is computed between the labels and the returned logits.
Returns:
Example::
......@@ -1280,6 +1288,7 @@ class VisualBertForVisualReasoning(VisualBertPreTrainedModel):
self.init_weights()
@add_start_docstrings_to_model_forward(VISUAL_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
......@@ -1302,6 +1311,8 @@ class VisualBertForVisualReasoning(VisualBertPreTrainedModel):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
config.num_labels - 1]`. A classification loss is computed (Cross-Entropy) against these labels.
Returns:
Example::
>>> # Assumption: `get_visual_embeddings(image)` gets the visual embeddings of the image in the batch.
......@@ -1433,6 +1444,7 @@ class VisualBertForRegionToPhraseAlignment(VisualBertPreTrainedModel):
self.init_weights()
@add_start_docstrings_to_model_forward(VISUAL_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
......@@ -1459,6 +1471,8 @@ class VisualBertForRegionToPhraseAlignment(VisualBertPreTrainedModel):
Labels for computing the masked language modeling loss. KLDivLoss is computed against these labels and
the outputs from the attention layer.
Returns:
Example::
>>> # Assumption: `get_visual_embeddings(image)` gets the visual embeddings of the image in the batch.
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment