Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
transformers
Commits
d0422de5
Unverified
Commit
d0422de5
authored
Dec 21, 2021
by
Sylvain Gugger
Committed by
GitHub
Dec 21, 2021
Browse files
Fix doc mistakes (#14874)
* Remove double returns * Last fixes * Quality * Last fix for Lxmert
parent
e846a56c
Changes
5
Hide whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
13 additions
and
40 deletions
+13
-40
src/transformers/models/lxmert/modeling_lxmert.py
src/transformers/models/lxmert/modeling_lxmert.py
+3
-5
src/transformers/models/lxmert/modeling_tf_lxmert.py
src/transformers/models/lxmert/modeling_tf_lxmert.py
+3
-3
src/transformers/models/perceiver/modeling_perceiver.py
src/transformers/models/perceiver/modeling_perceiver.py
+1
-7
src/transformers/models/tapas/modeling_tf_tapas.py
src/transformers/models/tapas/modeling_tf_tapas.py
+5
-25
src/transformers/models/visual_bert/modeling_visual_bert.py
src/transformers/models/visual_bert/modeling_visual_bert.py
+1
-0
No files found.
src/transformers/models/lxmert/modeling_lxmert.py
View file @
d0422de5
...
@@ -823,12 +823,12 @@ LXMERT_INPUTS_DOCSTRING = r"""
...
@@ -823,12 +823,12 @@ LXMERT_INPUTS_DOCSTRING = r"""
details.
details.
[What are input IDs?](../glossary#input-ids)
[What are input IDs?](../glossary#input-ids)
visual_feats: (`torch.FloatTensor` of shape
:obj:՝
(batch_size, num_visual_features, visual_feat_dim)
՝
):
visual_feats: (`torch.FloatTensor` of shape
`
(batch_size, num_visual_features, visual_feat_dim)
`
):
This input represents visual features. They ROI pooled object features from bounding boxes using a
This input represents visual features. They ROI pooled object features from bounding boxes using a
faster-RCNN model)
faster-RCNN model)
These are currently not provided by the transformers library.
These are currently not provided by the transformers library.
visual_pos: (`torch.FloatTensor` of shape
:obj:՝
(batch_size, num_visual_features, visual_pos_dim)
՝
):
visual_pos: (`torch.FloatTensor` of shape
`
(batch_size, num_visual_features, visual_pos_dim)
`
):
This input represents spacial features corresponding to their relative (via index) visual features. The
This input represents spacial features corresponding to their relative (via index) visual features. The
pre-trained LXMERT model expects these spacial features to be normalized bounding boxes on a scale of 0 to
pre-trained LXMERT model expects these spacial features to be normalized bounding boxes on a scale of 0 to
1.
1.
...
@@ -1176,7 +1176,7 @@ class LxmertForPreTraining(LxmertPreTrainedModel):
...
@@ -1176,7 +1176,7 @@ class LxmertForPreTraining(LxmertPreTrainedModel):
- 0 indicates that the sentence does not match the image,
- 0 indicates that the sentence does not match the image,
- 1 indicates that the sentence does match the image.
- 1 indicates that the sentence does match the image.
ans
:
(`Torch.Tensor` of shape `(batch_size)`, *optional*):
ans (`Torch.Tensor` of shape `(batch_size)`, *optional*):
a one hot representation hof the correct answer *optional*
a one hot representation hof the correct answer *optional*
Returns:
Returns:
...
@@ -1397,8 +1397,6 @@ class LxmertForQuestionAnswering(LxmertPreTrainedModel):
...
@@ -1397,8 +1397,6 @@ class LxmertForQuestionAnswering(LxmertPreTrainedModel):
r
"""
r
"""
labels: (`Torch.Tensor` of shape `(batch_size)`, *optional*):
labels: (`Torch.Tensor` of shape `(batch_size)`, *optional*):
A one-hot representation of the correct answer
A one-hot representation of the correct answer
Returns:
"""
"""
return_dict
=
return_dict
if
return_dict
is
not
None
else
self
.
config
.
use_return_dict
return_dict
=
return_dict
if
return_dict
is
not
None
else
self
.
config
.
use_return_dict
...
...
src/transformers/models/lxmert/modeling_tf_lxmert.py
View file @
d0422de5
...
@@ -878,12 +878,12 @@ LXMERT_INPUTS_DOCSTRING = r"""
...
@@ -878,12 +878,12 @@ LXMERT_INPUTS_DOCSTRING = r"""
details.
details.
[What are input IDs?](../glossary#input-ids)
[What are input IDs?](../glossary#input-ids)
visual_feats: (`tf.Tensor` of shape
:obj:՝
(batch_size, num_visual_features, visual_feat_dim)
՝
):
visual_feats: (`tf.Tensor` of shape
`
(batch_size, num_visual_features, visual_feat_dim)
`
):
This input represents visual features. They ROI pooled object features from bounding boxes using a
This input represents visual features. They ROI pooled object features from bounding boxes using a
faster-RCNN model)
faster-RCNN model)
These are currently not provided by the transformers library.
These are currently not provided by the transformers library.
visual_pos: (`tf.Tensor` of shape
:obj:՝
(batch_size, num_visual_features, visual_feat_dim)
՝
):
visual_pos: (`tf.Tensor` of shape
`
(batch_size, num_visual_features, visual_feat_dim)
`
):
This input represents spacial features corresponding to their relative (via index) visual features. The
This input represents spacial features corresponding to their relative (via index) visual features. The
pre-trained LXMERT model expects these spacial features to be normalized bounding boxes on a scale of 0 to
pre-trained LXMERT model expects these spacial features to be normalized bounding boxes on a scale of 0 to
1.
1.
...
@@ -1327,7 +1327,7 @@ class TFLxmertForPreTraining(TFLxmertPreTrainedModel):
...
@@ -1327,7 +1327,7 @@ class TFLxmertForPreTraining(TFLxmertPreTrainedModel):
- 0 indicates that the sentence does not match the image,
- 0 indicates that the sentence does not match the image,
- 1 indicates that the sentence does match the image.
- 1 indicates that the sentence does match the image.
ans
:
(`Torch.Tensor` of shape `(batch_size)`, *optional*, defaults to :obj: *None*):
ans (`Torch.Tensor` of shape `(batch_size)`, *optional*, defaults to :obj: *None*):
a one hot representation hof the correct answer *optional*
a one hot representation hof the correct answer *optional*
Returns:
Returns:
...
...
src/transformers/models/perceiver/modeling_perceiver.py
View file @
d0422de5
...
@@ -30,7 +30,6 @@ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
...
@@ -30,7 +30,6 @@ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from
...activations
import
ACT2FN
from
...activations
import
ACT2FN
from
...file_utils
import
(
from
...file_utils
import
(
ModelOutput
,
ModelOutput
,
add_code_sample_docstrings
,
add_start_docstrings
,
add_start_docstrings
,
add_start_docstrings_to_model_forward
,
add_start_docstrings_to_model_forward
,
replace_return_docstrings
,
replace_return_docstrings
,
...
@@ -1087,12 +1086,7 @@ class PerceiverForSequenceClassification(PerceiverPreTrainedModel):
...
@@ -1087,12 +1086,7 @@ class PerceiverForSequenceClassification(PerceiverPreTrainedModel):
self
.
post_init
()
self
.
post_init
()
@
add_start_docstrings_to_model_forward
(
PERCEIVER_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_start_docstrings_to_model_forward
(
PERCEIVER_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
@
replace_return_docstrings
(
output_type
=
PerceiverClassifierOutput
,
config_class
=
_CONFIG_FOR_DOC
)
processor_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
PerceiverClassifierOutput
,
config_class
=
_CONFIG_FOR_DOC
,
)
def
forward
(
def
forward
(
self
,
self
,
inputs
=
None
,
inputs
=
None
,
...
...
src/transformers/models/tapas/modeling_tf_tapas.py
View file @
d0422de5
...
@@ -25,10 +25,10 @@ import tensorflow as tf
...
@@ -25,10 +25,10 @@ import tensorflow as tf
from
...activations_tf
import
get_tf_activation
from
...activations_tf
import
get_tf_activation
from
...file_utils
import
(
from
...file_utils
import
(
ModelOutput
,
ModelOutput
,
add_code_sample_docstrings
,
add_start_docstrings
,
add_start_docstrings
,
add_start_docstrings_to_model_forward
,
add_start_docstrings_to_model_forward
,
is_tensorflow_probability_available
,
is_tensorflow_probability_available
,
replace_return_docstrings
,
requires_backends
,
requires_backends
,
)
)
from
...modeling_tf_outputs
import
(
from
...modeling_tf_outputs
import
(
...
@@ -981,12 +981,7 @@ class TFTapasModel(TFTapasPreTrainedModel):
...
@@ -981,12 +981,7 @@ class TFTapasModel(TFTapasPreTrainedModel):
self
.
tapas
=
TFTapasMainLayer
(
config
,
name
=
"tapas"
)
self
.
tapas
=
TFTapasMainLayer
(
config
,
name
=
"tapas"
)
@
add_start_docstrings_to_model_forward
(
TAPAS_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_start_docstrings_to_model_forward
(
TAPAS_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
@
replace_return_docstrings
(
output_type
=
TFBaseModelOutputWithPooling
,
config_class
=
_CONFIG_FOR_DOC
)
processor_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFBaseModelOutputWithPooling
,
config_class
=
_CONFIG_FOR_DOC
,
)
def
call
(
def
call
(
self
,
self
,
input_ids
:
Optional
[
TFModelInputType
]
=
None
,
input_ids
:
Optional
[
TFModelInputType
]
=
None
,
...
@@ -1085,12 +1080,7 @@ class TFTapasForMaskedLM(TFTapasPreTrainedModel, TFMaskedLanguageModelingLoss):
...
@@ -1085,12 +1080,7 @@ class TFTapasForMaskedLM(TFTapasPreTrainedModel, TFMaskedLanguageModelingLoss):
return
self
.
lm_head
.
predictions
return
self
.
lm_head
.
predictions
@
add_start_docstrings_to_model_forward
(
TAPAS_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_start_docstrings_to_model_forward
(
TAPAS_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
@
replace_return_docstrings
(
output_type
=
TFMaskedLMOutput
,
config_class
=
_CONFIG_FOR_DOC
)
processor_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFMaskedLMOutput
,
config_class
=
_CONFIG_FOR_DOC
,
)
def
call
(
def
call
(
self
,
self
,
input_ids
:
Optional
[
TFModelInputType
]
=
None
,
input_ids
:
Optional
[
TFModelInputType
]
=
None
,
...
@@ -1314,12 +1304,7 @@ class TFTapasForQuestionAnswering(TFTapasPreTrainedModel):
...
@@ -1314,12 +1304,7 @@ class TFTapasForQuestionAnswering(TFTapasPreTrainedModel):
self
.
config
=
config
self
.
config
=
config
@
add_start_docstrings_to_model_forward
(
TAPAS_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_start_docstrings_to_model_forward
(
TAPAS_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
@
replace_return_docstrings
(
output_type
=
TFTableQuestionAnsweringOutput
,
config_class
=
_CONFIG_FOR_DOC
)
processor_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFTableQuestionAnsweringOutput
,
config_class
=
_CONFIG_FOR_DOC
,
)
def
call
(
def
call
(
self
,
self
,
input_ids
:
Optional
[
TFModelInputType
]
=
None
,
input_ids
:
Optional
[
TFModelInputType
]
=
None
,
...
@@ -1664,12 +1649,7 @@ class TFTapasForSequenceClassification(TFTapasPreTrainedModel, TFSequenceClassif
...
@@ -1664,12 +1649,7 @@ class TFTapasForSequenceClassification(TFTapasPreTrainedModel, TFSequenceClassif
)
)
@
add_start_docstrings_to_model_forward
(
TAPAS_INPUTS_DOCSTRING
.
format
(
"batch_size, num_choices, sequence_length"
))
@
add_start_docstrings_to_model_forward
(
TAPAS_INPUTS_DOCSTRING
.
format
(
"batch_size, num_choices, sequence_length"
))
@
add_code_sample_docstrings
(
@
replace_return_docstrings
(
output_type
=
TFSequenceClassifierOutput
,
config_class
=
_CONFIG_FOR_DOC
)
processor_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFSequenceClassifierOutput
,
config_class
=
_CONFIG_FOR_DOC
,
)
def
call
(
def
call
(
self
,
self
,
input_ids
:
Optional
[
TFModelInputType
]
=
None
,
input_ids
:
Optional
[
TFModelInputType
]
=
None
,
...
...
src/transformers/models/visual_bert/modeling_visual_bert.py
View file @
d0422de5
...
@@ -716,6 +716,7 @@ class VisualBertModel(VisualBertPreTrainedModel):
...
@@ -716,6 +716,7 @@ class VisualBertModel(VisualBertPreTrainedModel):
self
.
encoder
.
layer
[
layer
].
attention
.
prune_heads
(
heads
)
self
.
encoder
.
layer
[
layer
].
attention
.
prune_heads
(
heads
)
@
add_start_docstrings_to_model_forward
(
VISUAL_BERT_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_start_docstrings_to_model_forward
(
VISUAL_BERT_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
replace_return_docstrings
(
output_type
=
BaseModelOutputWithPooling
,
config_class
=
_CONFIG_FOR_DOC
)
def
forward
(
def
forward
(
self
,
self
,
input_ids
=
None
,
input_ids
=
None
,
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment