Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
transformers
Commits
f5af8736
Unverified
Commit
f5af8736
authored
Oct 16, 2021
by
Patrick von Platen
Committed by
GitHub
Oct 16, 2021
Browse files
[Docs] More general docstrings (#14028)
* up * finish * up * up * finish
parent
47489a69
Changes
74
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
82 additions
and
82 deletions
+82
-82
src/transformers/models/longformer/modeling_tf_longformer.py
src/transformers/models/longformer/modeling_tf_longformer.py
+5
-5
src/transformers/models/lxmert/modeling_lxmert.py
src/transformers/models/lxmert/modeling_lxmert.py
+2
-2
src/transformers/models/lxmert/modeling_tf_lxmert.py
src/transformers/models/lxmert/modeling_tf_lxmert.py
+1
-1
src/transformers/models/m2m_100/modeling_m2m_100.py
src/transformers/models/m2m_100/modeling_m2m_100.py
+1
-1
src/transformers/models/marian/modeling_tf_marian.py
src/transformers/models/marian/modeling_tf_marian.py
+1
-1
src/transformers/models/mbart/modeling_mbart.py
src/transformers/models/mbart/modeling_mbart.py
+3
-3
src/transformers/models/mbart/modeling_tf_mbart.py
src/transformers/models/mbart/modeling_tf_mbart.py
+1
-1
src/transformers/models/megatron_bert/modeling_megatron_bert.py
...ansformers/models/megatron_bert/modeling_megatron_bert.py
+6
-6
src/transformers/models/mobilebert/modeling_mobilebert.py
src/transformers/models/mobilebert/modeling_mobilebert.py
+6
-6
src/transformers/models/mobilebert/modeling_tf_mobilebert.py
src/transformers/models/mobilebert/modeling_tf_mobilebert.py
+6
-6
src/transformers/models/mpnet/modeling_mpnet.py
src/transformers/models/mpnet/modeling_mpnet.py
+6
-6
src/transformers/models/mpnet/modeling_tf_mpnet.py
src/transformers/models/mpnet/modeling_tf_mpnet.py
+6
-6
src/transformers/models/openai/modeling_openai.py
src/transformers/models/openai/modeling_openai.py
+3
-3
src/transformers/models/openai/modeling_tf_openai.py
src/transformers/models/openai/modeling_tf_openai.py
+3
-3
src/transformers/models/pegasus/modeling_tf_pegasus.py
src/transformers/models/pegasus/modeling_tf_pegasus.py
+1
-1
src/transformers/models/reformer/modeling_reformer.py
src/transformers/models/reformer/modeling_reformer.py
+5
-5
src/transformers/models/rembert/modeling_rembert.py
src/transformers/models/rembert/modeling_rembert.py
+6
-6
src/transformers/models/rembert/modeling_tf_rembert.py
src/transformers/models/rembert/modeling_tf_rembert.py
+7
-7
src/transformers/models/roberta/modeling_roberta.py
src/transformers/models/roberta/modeling_roberta.py
+6
-6
src/transformers/models/roberta/modeling_tf_roberta.py
src/transformers/models/roberta/modeling_tf_roberta.py
+7
-7
No files found.
src/transformers/models/longformer/modeling_tf_longformer.py
View file @
f5af8736
...
...
@@ -2088,7 +2088,7 @@ class TFLongformerForMaskedLM(TFLongformerPreTrainedModel, TFMaskedLanguageModel
@
add_start_docstrings_to_model_forward
(
LONGFORMER_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFLongformerMaskedLMOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -2197,7 +2197,7 @@ class TFLongformerForQuestionAnswering(TFLongformerPreTrainedModel, TFQuestionAn
@
add_start_docstrings_to_model_forward
(
LONGFORMER_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
"allenai/longformer-large-4096-finetuned-triviaqa"
,
output_type
=
TFLongformerQuestionAnsweringModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -2366,7 +2366,7 @@ class TFLongformerForSequenceClassification(TFLongformerPreTrainedModel, TFSeque
@
add_start_docstrings_to_model_forward
(
LONGFORMER_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFLongformerSequenceClassifierOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -2492,7 +2492,7 @@ class TFLongformerForMultipleChoice(TFLongformerPreTrainedModel, TFMultipleChoic
LONGFORMER_INPUTS_DOCSTRING
.
format
(
"batch_size, num_choices, sequence_length"
)
)
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFLongformerMultipleChoiceModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -2645,7 +2645,7 @@ class TFLongformerForTokenClassification(TFLongformerPreTrainedModel, TFTokenCla
@
add_start_docstrings_to_model_forward
(
LONGFORMER_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFLongformerTokenClassifierOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
src/transformers/models/lxmert/modeling_lxmert.py
View file @
f5af8736
...
...
@@ -901,7 +901,7 @@ class LxmertModel(LxmertPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
LXMERT_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
LxmertModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1384,7 +1384,7 @@ class LxmertForQuestionAnswering(LxmertPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
LXMERT_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
LxmertForQuestionAnsweringOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
src/transformers/models/lxmert/modeling_tf_lxmert.py
View file @
f5af8736
...
...
@@ -950,7 +950,7 @@ class TFLxmertModel(TFLxmertPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
LXMERT_INPUTS_DOCSTRING
)
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFLxmertModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
src/transformers/models/m2m_100/modeling_m2m_100.py
View file @
f5af8736
...
...
@@ -1123,7 +1123,7 @@ class M2M100Model(M2M100PreTrainedModel):
@
add_start_docstrings_to_model_forward
(
M2M_100_INPUTS_DOCSTRING
)
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
Seq2SeqModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
src/transformers/models/marian/modeling_tf_marian.py
View file @
f5af8736
...
...
@@ -1224,7 +1224,7 @@ class TFMarianModel(TFMarianPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
MARIAN_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFSeq2SeqModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
src/transformers/models/mbart/modeling_mbart.py
View file @
f5af8736
...
...
@@ -1134,7 +1134,7 @@ class MBartModel(MBartPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
MBART_INPUTS_DOCSTRING
)
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
Seq2SeqModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1405,7 +1405,7 @@ class MBartForSequenceClassification(MBartPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
MBART_INPUTS_DOCSTRING
)
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
Seq2SeqSequenceClassifierOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1518,7 +1518,7 @@ class MBartForQuestionAnswering(MBartPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
MBART_INPUTS_DOCSTRING
)
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
Seq2SeqQuestionAnsweringModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
src/transformers/models/mbart/modeling_tf_mbart.py
View file @
f5af8736
...
...
@@ -1208,7 +1208,7 @@ class TFMBartModel(TFMBartPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
MBART_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFSeq2SeqModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
src/transformers/models/megatron_bert/modeling_megatron_bert.py
View file @
f5af8736
...
...
@@ -873,7 +873,7 @@ class MegatronBertModel(MegatronBertPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
MEGATRON_BERT_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
BaseModelOutputWithPoolingAndCrossAttentions
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1282,7 +1282,7 @@ class MegatronBertForMaskedLM(MegatronBertPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
MEGATRON_BERT_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
MaskedLMOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1480,7 +1480,7 @@ class MegatronBertForSequenceClassification(MegatronBertPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
MEGATRON_BERT_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
SequenceClassifierOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1566,7 +1566,7 @@ class MegatronBertForMultipleChoice(MegatronBertPreTrainedModel):
MEGATRON_BERT_INPUTS_DOCSTRING
.
format
(
"batch_size, num_choices, sequence_length"
)
)
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
MultipleChoiceModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1661,7 +1661,7 @@ class MegatronBertForTokenClassification(MegatronBertPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
MEGATRON_BERT_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TokenClassifierOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1751,7 +1751,7 @@ class MegatronBertForQuestionAnswering(MegatronBertPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
MEGATRON_BERT_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
QuestionAnsweringModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
src/transformers/models/mobilebert/modeling_mobilebert.py
View file @
f5af8736
...
...
@@ -817,7 +817,7 @@ class MobileBertModel(MobileBertPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
MOBILEBERT_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
BaseModelOutputWithPooling
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1032,7 +1032,7 @@ class MobileBertForMaskedLM(MobileBertPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
MOBILEBERT_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
MaskedLMOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1222,7 +1222,7 @@ class MobileBertForSequenceClassification(MobileBertPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
MOBILEBERT_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
SequenceClassifierOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1322,7 +1322,7 @@ class MobileBertForQuestionAnswering(MobileBertPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
MOBILEBERT_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
QuestionAnsweringModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1427,7 +1427,7 @@ class MobileBertForMultipleChoice(MobileBertPreTrainedModel):
MOBILEBERT_INPUTS_DOCSTRING
.
format
(
"batch_size, num_choices, sequence_length"
)
)
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
MultipleChoiceModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1526,7 +1526,7 @@ class MobileBertForTokenClassification(MobileBertPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
MOBILEBERT_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TokenClassifierOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
src/transformers/models/mobilebert/modeling_tf_mobilebert.py
View file @
f5af8736
...
...
@@ -933,7 +933,7 @@ class TFMobileBertModel(TFMobileBertPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
MOBILEBERT_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFBaseModelOutputWithPooling
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1124,7 +1124,7 @@ class TFMobileBertForMaskedLM(TFMobileBertPreTrainedModel, TFMaskedLanguageModel
@
add_start_docstrings_to_model_forward
(
MOBILEBERT_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFMaskedLMOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1348,7 +1348,7 @@ class TFMobileBertForSequenceClassification(TFMobileBertPreTrainedModel, TFSeque
@
add_start_docstrings_to_model_forward
(
MOBILEBERT_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFSequenceClassifierOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1456,7 +1456,7 @@ class TFMobileBertForQuestionAnswering(TFMobileBertPreTrainedModel, TFQuestionAn
@
add_start_docstrings_to_model_forward
(
MOBILEBERT_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFQuestionAnsweringModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1591,7 +1591,7 @@ class TFMobileBertForMultipleChoice(TFMobileBertPreTrainedModel, TFMultipleChoic
MOBILEBERT_INPUTS_DOCSTRING
.
format
(
"batch_size, num_choices, sequence_length"
)
)
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFMultipleChoiceModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1742,7 +1742,7 @@ class TFMobileBertForTokenClassification(TFMobileBertPreTrainedModel, TFTokenCla
@
add_start_docstrings_to_model_forward
(
MOBILEBERT_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFTokenClassifierOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
src/transformers/models/mpnet/modeling_mpnet.py
View file @
f5af8736
...
...
@@ -511,7 +511,7 @@ class MPNetModel(MPNetPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
MPNET_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
BaseModelOutputWithPooling
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -593,7 +593,7 @@ class MPNetForMaskedLM(MPNetPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
MPNET_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
MaskedLMOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -695,7 +695,7 @@ class MPNetForSequenceClassification(MPNetPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
MPNET_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
SequenceClassifierOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -777,7 +777,7 @@ class MPNetForMultipleChoice(MPNetPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
MPNET_INPUTS_DOCSTRING
.
format
(
"batch_size, num_choices, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
MultipleChoiceModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -869,7 +869,7 @@ class MPNetForTokenClassification(MPNetPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
MPNET_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TokenClassifierOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -977,7 +977,7 @@ class MPNetForQuestionAnswering(MPNetPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
MPNET_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
QuestionAnsweringModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
src/transformers/models/mpnet/modeling_tf_mpnet.py
View file @
f5af8736
...
...
@@ -684,7 +684,7 @@ class TFMPNetModel(TFMPNetPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
MPNET_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFBaseModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -813,7 +813,7 @@ class TFMPNetForMaskedLM(TFMPNetPreTrainedModel, TFMaskedLanguageModelingLoss):
@
add_start_docstrings_to_model_forward
(
MPNET_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFMaskedLMOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -934,7 +934,7 @@ class TFMPNetForSequenceClassification(TFMPNetPreTrainedModel, TFSequenceClassif
@
add_start_docstrings_to_model_forward
(
MPNET_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFSequenceClassifierOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1040,7 +1040,7 @@ class TFMPNetForMultipleChoice(TFMPNetPreTrainedModel, TFMultipleChoiceLoss):
@
add_start_docstrings_to_model_forward
(
MPNET_INPUTS_DOCSTRING
.
format
(
"batch_size, num_choices, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFMultipleChoiceModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1172,7 +1172,7 @@ class TFMPNetForTokenClassification(TFMPNetPreTrainedModel, TFTokenClassificatio
@
add_start_docstrings_to_model_forward
(
MPNET_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFTokenClassifierOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1271,7 +1271,7 @@ class TFMPNetForQuestionAnswering(TFMPNetPreTrainedModel, TFQuestionAnsweringLos
@
add_start_docstrings_to_model_forward
(
MPNET_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFQuestionAnsweringModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
src/transformers/models/openai/modeling_openai.py
View file @
f5af8736
...
...
@@ -433,7 +433,7 @@ class OpenAIGPTModel(OpenAIGPTPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
OPENAI_GPT_INPUTS_DOCSTRING
)
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
BaseModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -552,7 +552,7 @@ class OpenAIGPTLMHeadModel(OpenAIGPTPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
OPENAI_GPT_INPUTS_DOCSTRING
)
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
CausalLMOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -756,7 +756,7 @@ class OpenAIGPTForSequenceClassification(OpenAIGPTPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
OPENAI_GPT_INPUTS_DOCSTRING
)
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
SequenceClassifierOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
src/transformers/models/openai/modeling_tf_openai.py
View file @
f5af8736
...
...
@@ -522,7 +522,7 @@ class TFOpenAIGPTModel(TFOpenAIGPTPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
OPENAI_GPT_INPUTS_DOCSTRING
)
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFBaseModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -598,7 +598,7 @@ class TFOpenAIGPTLMHeadModel(TFOpenAIGPTPreTrainedModel, TFCausalLanguageModelin
@
add_start_docstrings_to_model_forward
(
OPENAI_GPT_INPUTS_DOCSTRING
)
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFCausalLMOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -856,7 +856,7 @@ class TFOpenAIGPTForSequenceClassification(TFOpenAIGPTPreTrainedModel, TFSequenc
@
add_start_docstrings_to_model_forward
(
OPENAI_GPT_INPUTS_DOCSTRING
)
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFSequenceClassifierOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
src/transformers/models/pegasus/modeling_tf_pegasus.py
View file @
f5af8736
...
...
@@ -1233,7 +1233,7 @@ class TFPegasusModel(TFPegasusPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
PEGASUS_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFSeq2SeqModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
src/transformers/models/reformer/modeling_reformer.py
View file @
f5af8736
...
...
@@ -1992,7 +1992,7 @@ class ReformerModel(ReformerPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
REFORMER_INPUTS_DOCSTRING
)
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
ReformerModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -2198,7 +2198,7 @@ class ReformerModelWithLMHead(ReformerPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
REFORMER_INPUTS_DOCSTRING
)
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
CausalLMOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -2313,7 +2313,7 @@ class ReformerForMaskedLM(ReformerPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
REFORMER_INPUTS_DOCSTRING
)
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
MaskedLMOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -2394,7 +2394,7 @@ class ReformerForSequenceClassification(ReformerPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
REFORMER_INPUTS_DOCSTRING
)
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
SequenceClassifierOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -2512,7 +2512,7 @@ class ReformerForQuestionAnswering(ReformerPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
REFORMER_INPUTS_DOCSTRING
)
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
QuestionAnsweringModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
src/transformers/models/rembert/modeling_rembert.py
View file @
f5af8736
...
...
@@ -781,7 +781,7 @@ class RemBertModel(RemBertPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
REMBERT_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
"rembert"
,
output_type
=
BaseModelOutputWithPastAndCrossAttentions
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -933,7 +933,7 @@ class RemBertForMaskedLM(RemBertPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
REMBERT_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
"rembert"
,
output_type
=
MaskedLMOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1175,7 +1175,7 @@ class RemBertForSequenceClassification(RemBertPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
REMBERT_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
"rembert"
,
output_type
=
SequenceClassifierOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1259,7 +1259,7 @@ class RemBertForMultipleChoice(RemBertPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
REMBERT_INPUTS_DOCSTRING
.
format
(
"batch_size, num_choices, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
"rembert"
,
output_type
=
MultipleChoiceModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1351,7 +1351,7 @@ class RemBertForTokenClassification(RemBertPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
REMBERT_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
"rembert"
,
output_type
=
TokenClassifierOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1439,7 +1439,7 @@ class RemBertForQuestionAnswering(RemBertPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
REMBERT_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
"rembert"
,
output_type
=
QuestionAnsweringModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
src/transformers/models/rembert/modeling_tf_rembert.py
View file @
f5af8736
...
...
@@ -956,7 +956,7 @@ class TFRemBertModel(TFRemBertPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
REMBERT_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
"rembert"
,
output_type
=
TFBaseModelOutputWithPoolingAndCrossAttentions
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1078,7 +1078,7 @@ class TFRemBertForMaskedLM(TFRemBertPreTrainedModel, TFMaskedLanguageModelingLos
@
add_start_docstrings_to_model_forward
(
REMBERT_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
"rembert"
,
output_type
=
TFMaskedLMOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1186,7 +1186,7 @@ class TFRemBertForCausalLM(TFRemBertPreTrainedModel, TFCausalLanguageModelingLos
}
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
"rembert"
,
output_type
=
TFCausalLMOutputWithCrossAttentions
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1329,7 +1329,7 @@ class TFRemBertForSequenceClassification(TFRemBertPreTrainedModel, TFSequenceCla
@
add_start_docstrings_to_model_forward
(
REMBERT_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
"rembert"
,
output_type
=
TFSequenceClassifierOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1435,7 +1435,7 @@ class TFRemBertForMultipleChoice(TFRemBertPreTrainedModel, TFMultipleChoiceLoss)
@
add_start_docstrings_to_model_forward
(
REMBERT_INPUTS_DOCSTRING
.
format
(
"batch_size, num_choices, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
"rembert"
,
output_type
=
TFMultipleChoiceModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1579,7 +1579,7 @@ class TFRemBertForTokenClassification(TFRemBertPreTrainedModel, TFTokenClassific
@
add_start_docstrings_to_model_forward
(
REMBERT_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
"rembert"
,
output_type
=
TFTokenClassifierOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1675,7 +1675,7 @@ class TFRemBertForQuestionAnswering(TFRemBertPreTrainedModel, TFQuestionAnswerin
@
add_start_docstrings_to_model_forward
(
REMBERT_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
"rembert"
,
output_type
=
TFQuestionAnsweringModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
src/transformers/models/roberta/modeling_roberta.py
View file @
f5af8736
...
...
@@ -739,7 +739,7 @@ class RobertaModel(RobertaPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
ROBERTA_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
BaseModelOutputWithPoolingAndCrossAttentions
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1058,7 +1058,7 @@ class RobertaForMaskedLM(RobertaPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
ROBERTA_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
MaskedLMOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1171,7 +1171,7 @@ class RobertaForSequenceClassification(RobertaPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
ROBERTA_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
SequenceClassifierOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1267,7 +1267,7 @@ class RobertaForMultipleChoice(RobertaPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
ROBERTA_INPUTS_DOCSTRING
.
format
(
"batch_size, num_choices, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
MultipleChoiceModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1364,7 +1364,7 @@ class RobertaForTokenClassification(RobertaPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
ROBERTA_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TokenClassifierOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1476,7 +1476,7 @@ class RobertaForQuestionAnswering(RobertaPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
ROBERTA_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
QuestionAnsweringModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
src/transformers/models/roberta/modeling_tf_roberta.py
View file @
f5af8736
...
...
@@ -933,7 +933,7 @@ class TFRobertaModel(TFRobertaPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
ROBERTA_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFBaseModelOutputWithPoolingAndCrossAttentions
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1108,7 +1108,7 @@ class TFRobertaForMaskedLM(TFRobertaPreTrainedModel, TFMaskedLanguageModelingLos
@
add_start_docstrings_to_model_forward
(
ROBERTA_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFMaskedLMOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1222,7 +1222,7 @@ class TFRobertaForCausalLM(TFRobertaPreTrainedModel, TFCausalLanguageModelingLos
@
add_start_docstrings_to_model_forward
(
ROBERTA_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFCausalLMOutputWithCrossAttentions
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1392,7 +1392,7 @@ class TFRobertaForSequenceClassification(TFRobertaPreTrainedModel, TFSequenceCla
@
add_start_docstrings_to_model_forward
(
ROBERTA_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFSequenceClassifierOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1503,7 +1503,7 @@ class TFRobertaForMultipleChoice(TFRobertaPreTrainedModel, TFMultipleChoiceLoss)
@
add_start_docstrings_to_model_forward
(
ROBERTA_INPUTS_DOCSTRING
.
format
(
"batch_size, num_choices, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFMultipleChoiceModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1641,7 +1641,7 @@ class TFRobertaForTokenClassification(TFRobertaPreTrainedModel, TFTokenClassific
@
add_start_docstrings_to_model_forward
(
ROBERTA_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFTokenClassifierOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1742,7 +1742,7 @@ class TFRobertaForQuestionAnswering(TFRobertaPreTrainedModel, TFQuestionAnswerin
@
add_start_docstrings_to_model_forward
(
ROBERTA_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFQuestionAnsweringModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
Prev
1
2
3
4
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment