Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
transformers
Commits
f5af8736
Unverified
Commit
f5af8736
authored
Oct 16, 2021
by
Patrick von Platen
Committed by
GitHub
Oct 16, 2021
Browse files
[Docs] More general docstrings (#14028)
* up * finish * up * up * finish
parent
47489a69
Changes
74
Show whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
83 additions
and
83 deletions
+83
-83
src/transformers/models/deberta_v2/modeling_deberta_v2.py
src/transformers/models/deberta_v2/modeling_deberta_v2.py
+5
-5
src/transformers/models/deberta_v2/modeling_tf_deberta_v2.py
src/transformers/models/deberta_v2/modeling_tf_deberta_v2.py
+5
-5
src/transformers/models/distilbert/modeling_distilbert.py
src/transformers/models/distilbert/modeling_distilbert.py
+5
-5
src/transformers/models/distilbert/modeling_tf_distilbert.py
src/transformers/models/distilbert/modeling_tf_distilbert.py
+6
-6
src/transformers/models/electra/modeling_electra.py
src/transformers/models/electra/modeling_electra.py
+6
-6
src/transformers/models/electra/modeling_tf_electra.py
src/transformers/models/electra/modeling_tf_electra.py
+6
-6
src/transformers/models/flaubert/modeling_flaubert.py
src/transformers/models/flaubert/modeling_flaubert.py
+1
-1
src/transformers/models/flaubert/modeling_tf_flaubert.py
src/transformers/models/flaubert/modeling_tf_flaubert.py
+2
-2
src/transformers/models/fnet/modeling_fnet.py
src/transformers/models/fnet/modeling_fnet.py
+6
-6
src/transformers/models/fsmt/modeling_fsmt.py
src/transformers/models/fsmt/modeling_fsmt.py
+1
-1
src/transformers/models/funnel/modeling_funnel.py
src/transformers/models/funnel/modeling_funnel.py
+7
-7
src/transformers/models/funnel/modeling_tf_funnel.py
src/transformers/models/funnel/modeling_tf_funnel.py
+7
-7
src/transformers/models/gpt2/modeling_gpt2.py
src/transformers/models/gpt2/modeling_gpt2.py
+4
-4
src/transformers/models/gpt2/modeling_tf_gpt2.py
src/transformers/models/gpt2/modeling_tf_gpt2.py
+3
-3
src/transformers/models/gpt_neo/modeling_gpt_neo.py
src/transformers/models/gpt_neo/modeling_gpt_neo.py
+3
-3
src/transformers/models/gptj/modeling_gptj.py
src/transformers/models/gptj/modeling_gptj.py
+3
-3
src/transformers/models/ibert/modeling_ibert.py
src/transformers/models/ibert/modeling_ibert.py
+6
-6
src/transformers/models/led/modeling_led.py
src/transformers/models/led/modeling_led.py
+3
-3
src/transformers/models/led/modeling_tf_led.py
src/transformers/models/led/modeling_tf_led.py
+1
-1
src/transformers/models/longformer/modeling_longformer.py
src/transformers/models/longformer/modeling_longformer.py
+3
-3
No files found.
src/transformers/models/deberta_v2/modeling_deberta_v2.py
View file @
f5af8736
...
...
@@ -974,7 +974,7 @@ class DebertaV2Model(DebertaV2PreTrainedModel):
@
add_start_docstrings_to_model_forward
(
DEBERTA_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
SequenceClassifierOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1081,7 +1081,7 @@ class DebertaV2ForMaskedLM(DebertaV2PreTrainedModel):
@
add_start_docstrings_to_model_forward
(
DEBERTA_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
MaskedLMOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1222,7 +1222,7 @@ class DebertaV2ForSequenceClassification(DebertaV2PreTrainedModel):
@
add_start_docstrings_to_model_forward
(
DEBERTA_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
SequenceClassifierOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1318,7 +1318,7 @@ class DebertaV2ForTokenClassification(DebertaV2PreTrainedModel):
@
add_start_docstrings_to_model_forward
(
DEBERTA_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TokenClassifierOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1406,7 +1406,7 @@ class DebertaV2ForQuestionAnswering(DebertaV2PreTrainedModel):
@
add_start_docstrings_to_model_forward
(
DEBERTA_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
QuestionAnsweringModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
src/transformers/models/deberta_v2/modeling_tf_deberta_v2.py
View file @
f5af8736
...
...
@@ -1223,7 +1223,7 @@ class TFDebertaV2Model(TFDebertaV2PreTrainedModel):
@
add_start_docstrings_to_model_forward
(
DEBERTA_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFBaseModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1296,7 +1296,7 @@ class TFDebertaV2ForMaskedLM(TFDebertaV2PreTrainedModel, TFMaskedLanguageModelin
@
add_start_docstrings_to_model_forward
(
DEBERTA_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFMaskedLMOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1399,7 +1399,7 @@ class TFDebertaV2ForSequenceClassification(TFDebertaV2PreTrainedModel, TFSequenc
@
add_start_docstrings_to_model_forward
(
DEBERTA_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFSequenceClassifierOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1497,7 +1497,7 @@ class TFDebertaV2ForTokenClassification(TFDebertaV2PreTrainedModel, TFTokenClass
@
add_start_docstrings_to_model_forward
(
DEBERTA_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFTokenClassifierOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1591,7 +1591,7 @@ class TFDebertaV2ForQuestionAnswering(TFDebertaV2PreTrainedModel, TFQuestionAnsw
@
add_start_docstrings_to_model_forward
(
DEBERTA_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFQuestionAnsweringModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
src/transformers/models/distilbert/modeling_distilbert.py
View file @
f5af8736
...
...
@@ -508,7 +508,7 @@ class DistilBertModel(DistilBertPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
DISTILBERT_INPUTS_DOCSTRING
.
format
(
"batch_size, num_choices"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
BaseModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -604,7 +604,7 @@ class DistilBertForMaskedLM(DistilBertPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
DISTILBERT_INPUTS_DOCSTRING
.
format
(
"batch_size, num_choices"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
MaskedLMOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -702,7 +702,7 @@ class DistilBertForSequenceClassification(DistilBertPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
DISTILBERT_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
SequenceClassifierOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -818,7 +818,7 @@ class DistilBertForQuestionAnswering(DistilBertPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
DISTILBERT_INPUTS_DOCSTRING
.
format
(
"batch_size, num_choices"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
QuestionAnsweringModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -935,7 +935,7 @@ class DistilBertForTokenClassification(DistilBertPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
DISTILBERT_INPUTS_DOCSTRING
)
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TokenClassifierOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
src/transformers/models/distilbert/modeling_tf_distilbert.py
View file @
f5af8736
...
...
@@ -543,7 +543,7 @@ class TFDistilBertModel(TFDistilBertPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
DISTILBERT_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFBaseModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -658,7 +658,7 @@ class TFDistilBertForMaskedLM(TFDistilBertPreTrainedModel, TFMaskedLanguageModel
@
add_start_docstrings_to_model_forward
(
DISTILBERT_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFMaskedLMOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -759,7 +759,7 @@ class TFDistilBertForSequenceClassification(TFDistilBertPreTrainedModel, TFSeque
@
add_start_docstrings_to_model_forward
(
DISTILBERT_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFSequenceClassifierOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -854,7 +854,7 @@ class TFDistilBertForTokenClassification(TFDistilBertPreTrainedModel, TFTokenCla
@
add_start_docstrings_to_model_forward
(
DISTILBERT_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFTokenClassifierOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -962,7 +962,7 @@ class TFDistilBertForMultipleChoice(TFDistilBertPreTrainedModel, TFMultipleChoic
DISTILBERT_INPUTS_DOCSTRING
.
format
(
"batch_size, num_choices, sequence_length"
)
)
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFMultipleChoiceModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1088,7 +1088,7 @@ class TFDistilBertForQuestionAnswering(TFDistilBertPreTrainedModel, TFQuestionAn
@
add_start_docstrings_to_model_forward
(
DISTILBERT_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFQuestionAnsweringModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
src/transformers/models/electra/modeling_electra.py
View file @
f5af8736
...
...
@@ -833,7 +833,7 @@ class ElectraModel(ElectraPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
ELECTRA_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
BaseModelOutputWithCrossAttentions
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -941,7 +941,7 @@ class ElectraForSequenceClassification(ElectraPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
ELECTRA_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
SequenceClassifierOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1136,7 +1136,7 @@ class ElectraForMaskedLM(ElectraPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
ELECTRA_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
MaskedLMOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1218,7 +1218,7 @@ class ElectraForTokenClassification(ElectraPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
ELECTRA_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TokenClassifierOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1307,7 +1307,7 @@ class ElectraForQuestionAnswering(ElectraPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
ELECTRA_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
QuestionAnsweringModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1408,7 +1408,7 @@ class ElectraForMultipleChoice(ElectraPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
ELECTRA_INPUTS_DOCSTRING
.
format
(
"batch_size, num_choices, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
MultipleChoiceModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
src/transformers/models/electra/modeling_tf_electra.py
View file @
f5af8736
...
...
@@ -950,7 +950,7 @@ class TFElectraModel(TFElectraPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
ELECTRA_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFBaseModelOutputWithPastAndCrossAttentions
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1211,7 +1211,7 @@ class TFElectraForMaskedLM(TFElectraPreTrainedModel, TFMaskedLanguageModelingLos
@
add_start_docstrings_to_model_forward
(
ELECTRA_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFMaskedLMOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1336,7 +1336,7 @@ class TFElectraForSequenceClassification(TFElectraPreTrainedModel, TFSequenceCla
@
add_start_docstrings_to_model_forward
(
ELECTRA_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFSequenceClassifierOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1444,7 +1444,7 @@ class TFElectraForMultipleChoice(TFElectraPreTrainedModel, TFMultipleChoiceLoss)
@
add_start_docstrings_to_model_forward
(
ELECTRA_INPUTS_DOCSTRING
.
format
(
"batch_size, num_choices, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFMultipleChoiceModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1584,7 +1584,7 @@ class TFElectraForTokenClassification(TFElectraPreTrainedModel, TFTokenClassific
@
add_start_docstrings_to_model_forward
(
ELECTRA_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFTokenClassifierOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1681,7 +1681,7 @@ class TFElectraForQuestionAnswering(TFElectraPreTrainedModel, TFQuestionAnswerin
@
add_start_docstrings_to_model_forward
(
ELECTRA_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFQuestionAnsweringModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
src/transformers/models/flaubert/modeling_flaubert.py
View file @
f5af8736
...
...
@@ -148,7 +148,7 @@ class FlaubertModel(XLMModel):
@
add_start_docstrings_to_model_forward
(
FLAUBERT_INPUTS_DOCSTRING
)
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
BaseModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
src/transformers/models/flaubert/modeling_tf_flaubert.py
View file @
f5af8736
...
...
@@ -236,7 +236,7 @@ class TFFlaubertModel(TFFlaubertPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
FLAUBERT_INPUTS_DOCSTRING
)
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFBaseModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -820,7 +820,7 @@ class TFFlaubertWithLMHeadModel(TFFlaubertPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
FLAUBERT_INPUTS_DOCSTRING
)
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFFlaubertWithLMHeadModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
src/transformers/models/fnet/modeling_fnet.py
View file @
f5af8736
...
...
@@ -545,7 +545,7 @@ class FNetModel(FNetPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
FNET_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
BaseModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -733,7 +733,7 @@ class FNetForMaskedLM(FNetPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
FNET_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
MaskedLMOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -889,7 +889,7 @@ class FNetForSequenceClassification(FNetPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
FNET_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
SequenceClassifierOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -961,7 +961,7 @@ class FNetForMultipleChoice(FNetPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
FNET_INPUTS_DOCSTRING
.
format
(
"batch_size, num_choices, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
MultipleChoiceModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1042,7 +1042,7 @@ class FNetForTokenClassification(FNetPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
FNET_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TokenClassifierOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1111,7 +1111,7 @@ class FNetForQuestionAnswering(FNetPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
FNET_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
QuestionAnsweringModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
src/transformers/models/fsmt/modeling_fsmt.py
View file @
f5af8736
...
...
@@ -1007,7 +1007,7 @@ class FSMTModel(PretrainedFSMTModel):
@
add_start_docstrings_to_model_forward
(
FSMT_INPUTS_DOCSTRING
)
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
Seq2SeqModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
src/transformers/models/funnel/modeling_funnel.py
View file @
f5af8736
...
...
@@ -910,7 +910,7 @@ class FunnelBaseModel(FunnelPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
FUNNEL_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
"funnel-transformer/small-base"
,
output_type
=
BaseModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -987,7 +987,7 @@ class FunnelModel(FunnelPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
FUNNEL_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
BaseModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1174,7 +1174,7 @@ class FunnelForMaskedLM(FunnelPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
FUNNEL_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
MaskedLMOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1248,7 +1248,7 @@ class FunnelForSequenceClassification(FunnelPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
FUNNEL_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
"funnel-transformer/small-base"
,
output_type
=
SequenceClassifierOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1338,7 +1338,7 @@ class FunnelForMultipleChoice(FunnelPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
FUNNEL_INPUTS_DOCSTRING
.
format
(
"batch_size, num_choices, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
"funnel-transformer/small-base"
,
output_type
=
MultipleChoiceModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1424,7 +1424,7 @@ class FunnelForTokenClassification(FunnelPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
FUNNEL_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TokenClassifierOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1506,7 +1506,7 @@ class FunnelForQuestionAnswering(FunnelPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
FUNNEL_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
QuestionAnsweringModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
src/transformers/models/funnel/modeling_tf_funnel.py
View file @
f5af8736
...
...
@@ -1126,7 +1126,7 @@ class TFFunnelBaseModel(TFFunnelPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
FUNNEL_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
"funnel-transformer/small-base"
,
output_type
=
TFBaseModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1187,7 +1187,7 @@ class TFFunnelModel(TFFunnelPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
FUNNEL_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
"funnel-transformer/small"
,
output_type
=
TFBaseModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1337,7 +1337,7 @@ class TFFunnelForMaskedLM(TFFunnelPreTrainedModel, TFMaskedLanguageModelingLoss)
@
add_start_docstrings_to_model_forward
(
FUNNEL_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
"funnel-transformer/small"
,
output_type
=
TFMaskedLMOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1426,7 +1426,7 @@ class TFFunnelForSequenceClassification(TFFunnelPreTrainedModel, TFSequenceClass
@
add_start_docstrings_to_model_forward
(
FUNNEL_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
"funnel-transformer/small-base"
,
output_type
=
TFSequenceClassifierOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1525,7 +1525,7 @@ class TFFunnelForMultipleChoice(TFFunnelPreTrainedModel, TFMultipleChoiceLoss):
@
add_start_docstrings_to_model_forward
(
FUNNEL_INPUTS_DOCSTRING
.
format
(
"batch_size, num_choices, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
"funnel-transformer/small-base"
,
output_type
=
TFMultipleChoiceModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1655,7 +1655,7 @@ class TFFunnelForTokenClassification(TFFunnelPreTrainedModel, TFTokenClassificat
@
add_start_docstrings_to_model_forward
(
FUNNEL_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
"funnel-transformer/small"
,
output_type
=
TFTokenClassifierOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1747,7 +1747,7 @@ class TFFunnelForQuestionAnswering(TFFunnelPreTrainedModel, TFQuestionAnsweringL
@
add_start_docstrings_to_model_forward
(
FUNNEL_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
"funnel-transformer/small"
,
output_type
=
TFQuestionAnsweringModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
src/transformers/models/gpt2/modeling_gpt2.py
View file @
f5af8736
...
...
@@ -732,7 +732,7 @@ class GPT2Model(GPT2PreTrainedModel):
@
add_start_docstrings_to_model_forward
(
GPT2_INPUTS_DOCSTRING
)
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
BaseModelOutputWithPastAndCrossAttentions
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1009,7 +1009,7 @@ class GPT2LMHeadModel(GPT2PreTrainedModel):
@
add_start_docstrings_to_model_forward
(
GPT2_INPUTS_DOCSTRING
)
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
CausalLMOutputWithCrossAttentions
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1338,7 +1338,7 @@ class GPT2ForSequenceClassification(GPT2PreTrainedModel):
@
add_start_docstrings_to_model_forward
(
GPT2_INPUTS_DOCSTRING
)
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
"microsoft/DialogRPT-updown"
,
output_type
=
SequenceClassifierOutputWithPast
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1457,7 +1457,7 @@ class GPT2ForTokenClassification(GPT2PreTrainedModel):
@
add_start_docstrings_to_model_forward
(
GPT2_INPUTS_DOCSTRING
)
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
"microsoft/DialogRPT-updown"
,
output_type
=
TokenClassifierOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
src/transformers/models/gpt2/modeling_tf_gpt2.py
View file @
f5af8736
...
...
@@ -587,7 +587,7 @@ class TFGPT2Model(TFGPT2PreTrainedModel):
@
add_start_docstrings_to_model_forward
(
GPT2_INPUTS_DOCSTRING
)
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFBaseModelOutputWithPast
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -679,7 +679,7 @@ class TFGPT2LMHeadModel(TFGPT2PreTrainedModel, TFCausalLanguageModelingLoss):
@
add_start_docstrings_to_model_forward
(
GPT2_INPUTS_DOCSTRING
)
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFCausalLMOutputWithPast
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -959,7 +959,7 @@ class TFGPT2ForSequenceClassification(TFGPT2PreTrainedModel, TFSequenceClassific
@
add_start_docstrings_to_model_forward
(
GPT2_INPUTS_DOCSTRING
)
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
"microsoft/DialogRPT-updown"
,
output_type
=
TFSequenceClassifierOutputWithPast
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
src/transformers/models/gpt_neo/modeling_gpt_neo.py
View file @
f5af8736
...
...
@@ -497,7 +497,7 @@ class GPTNeoModel(GPTNeoPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
GPT_NEO_INPUTS_DOCSTRING
)
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
BaseModelOutputWithPastAndCrossAttentions
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -713,7 +713,7 @@ class GPTNeoForCausalLM(GPTNeoPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
GPT_NEO_INPUTS_DOCSTRING
)
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
CausalLMOutputWithCrossAttentions
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -827,7 +827,7 @@ class GPTNeoForSequenceClassification(GPTNeoPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
GPT_NEO_INPUTS_DOCSTRING
)
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
SequenceClassifierOutputWithPast
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
src/transformers/models/gptj/modeling_gptj.py
View file @
f5af8736
...
...
@@ -491,7 +491,7 @@ class GPTJModel(GPTJPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
GPTJ_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
BaseModelOutputWithPast
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -743,7 +743,7 @@ class GPTJForCausalLM(GPTJPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
GPTJ_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
CausalLMOutputWithPast
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -864,7 +864,7 @@ class GPTJForSequenceClassification(GPTJPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
GPTJ_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
SequenceClassifierOutputWithPast
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
src/transformers/models/ibert/modeling_ibert.py
View file @
f5af8736
...
...
@@ -772,7 +772,7 @@ class IBertModel(IBertPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
IBERT_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
BaseModelOutputWithPoolingAndCrossAttentions
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -875,7 +875,7 @@ class IBertForMaskedLM(IBertPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
IBERT_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
MaskedLMOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -983,7 +983,7 @@ class IBertForSequenceClassification(IBertPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
IBERT_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
SequenceClassifierOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1066,7 +1066,7 @@ class IBertForMultipleChoice(IBertPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
IBERT_INPUTS_DOCSTRING
.
format
(
"batch_size, num_choices, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
MultipleChoiceModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1160,7 +1160,7 @@ class IBertForTokenClassification(IBertPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
IBERT_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TokenClassifierOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1269,7 +1269,7 @@ class IBertForQuestionAnswering(IBertPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
IBERT_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
QuestionAnsweringModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
src/transformers/models/led/modeling_led.py
View file @
f5af8736
...
...
@@ -2174,7 +2174,7 @@ class LEDModel(LEDPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
LED_INPUTS_DOCSTRING
)
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
Seq2SeqModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -2468,7 +2468,7 @@ class LEDForSequenceClassification(LEDPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
LED_INPUTS_DOCSTRING
)
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
Seq2SeqSequenceClassifierOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -2578,7 +2578,7 @@ class LEDForQuestionAnswering(LEDPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
LED_INPUTS_DOCSTRING
)
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
Seq2SeqQuestionAnsweringModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
src/transformers/models/led/modeling_tf_led.py
View file @
f5af8736
...
...
@@ -2230,7 +2230,7 @@ class TFLEDModel(TFLEDPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
LED_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFLEDSeq2SeqModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
src/transformers/models/longformer/modeling_longformer.py
View file @
f5af8736
...
...
@@ -1822,7 +1822,7 @@ class LongformerForSequenceClassification(LongformerPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
LONGFORMER_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
LongformerSequenceClassifierOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -2084,7 +2084,7 @@ class LongformerForTokenClassification(LongformerPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
LONGFORMER_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
LongformerTokenClassifierOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -2176,7 +2176,7 @@ class LongformerForMultipleChoice(LongformerPreTrainedModel):
LONGFORMER_INPUTS_DOCSTRING
.
format
(
"batch_size, num_choices, sequence_length"
)
)
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
LongformerMultipleChoiceModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
Prev
1
2
3
4
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment