Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
transformers
Commits
f5af8736
Unverified
Commit
f5af8736
authored
Oct 16, 2021
by
Patrick von Platen
Committed by
GitHub
Oct 16, 2021
Browse files
[Docs] More general docstrings (#14028)
* up * finish * up * up * finish
parent
47489a69
Changes
74
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
209 additions
and
120 deletions
+209
-120
src/transformers/file_utils.py
src/transformers/file_utils.py
+134
-45
src/transformers/modeling_flax_utils.py
src/transformers/modeling_flax_utils.py
+1
-1
src/transformers/models/albert/modeling_albert.py
src/transformers/models/albert/modeling_albert.py
+6
-6
src/transformers/models/albert/modeling_tf_albert.py
src/transformers/models/albert/modeling_tf_albert.py
+6
-6
src/transformers/models/bart/modeling_bart.py
src/transformers/models/bart/modeling_bart.py
+3
-3
src/transformers/models/bart/modeling_tf_bart.py
src/transformers/models/bart/modeling_tf_bart.py
+1
-1
src/transformers/models/bert/modeling_bert.py
src/transformers/models/bert/modeling_bert.py
+6
-6
src/transformers/models/bert/modeling_tf_bert.py
src/transformers/models/bert/modeling_tf_bert.py
+7
-7
src/transformers/models/bert_generation/modeling_bert_generation.py
...ormers/models/bert_generation/modeling_bert_generation.py
+1
-1
src/transformers/models/big_bird/modeling_big_bird.py
src/transformers/models/big_bird/modeling_big_bird.py
+6
-6
src/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py
...ormers/models/bigbird_pegasus/modeling_bigbird_pegasus.py
+3
-3
src/transformers/models/blenderbot/modeling_tf_blenderbot.py
src/transformers/models/blenderbot/modeling_tf_blenderbot.py
+1
-1
src/transformers/models/blenderbot_small/modeling_tf_blenderbot_small.py
...s/models/blenderbot_small/modeling_tf_blenderbot_small.py
+1
-1
src/transformers/models/canine/modeling_canine.py
src/transformers/models/canine/modeling_canine.py
+5
-5
src/transformers/models/convbert/modeling_convbert.py
src/transformers/models/convbert/modeling_convbert.py
+6
-6
src/transformers/models/convbert/modeling_tf_convbert.py
src/transformers/models/convbert/modeling_tf_convbert.py
+6
-6
src/transformers/models/ctrl/modeling_ctrl.py
src/transformers/models/ctrl/modeling_ctrl.py
+3
-3
src/transformers/models/ctrl/modeling_tf_ctrl.py
src/transformers/models/ctrl/modeling_tf_ctrl.py
+3
-3
src/transformers/models/deberta/modeling_deberta.py
src/transformers/models/deberta/modeling_deberta.py
+5
-5
src/transformers/models/deberta/modeling_tf_deberta.py
src/transformers/models/deberta/modeling_tf_deberta.py
+5
-5
No files found.
src/transformers/file_utils.py
View file @
f5af8736
...
...
@@ -791,10 +791,10 @@ def _prepare_output_docstrings(output_type, config_class):
PT_TOKEN_CLASSIFICATION_SAMPLE
=
r
"""
Example::
>>> from transformers import {
tokenize
r_class}, {model_class}
>>> from transformers import {
processo
r_class}, {model_class}
>>> import torch
>>> tokenizer = {
tokenize
r_class}.from_pretrained('{checkpoint}')
>>> tokenizer = {
processo
r_class}.from_pretrained('{checkpoint}')
>>> model = {model_class}.from_pretrained('{checkpoint}')
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
...
...
@@ -808,10 +808,10 @@ PT_TOKEN_CLASSIFICATION_SAMPLE = r"""
PT_QUESTION_ANSWERING_SAMPLE
=
r
"""
Example::
>>> from transformers import {
tokenize
r_class}, {model_class}
>>> from transformers import {
processo
r_class}, {model_class}
>>> import torch
>>> tokenizer = {
tokenize
r_class}.from_pretrained('{checkpoint}')
>>> tokenizer = {
processo
r_class}.from_pretrained('{checkpoint}')
>>> model = {model_class}.from_pretrained('{checkpoint}')
>>> question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet"
...
...
@@ -828,10 +828,10 @@ PT_QUESTION_ANSWERING_SAMPLE = r"""
PT_SEQUENCE_CLASSIFICATION_SAMPLE
=
r
"""
Example::
>>> from transformers import {
tokenize
r_class}, {model_class}
>>> from transformers import {
processo
r_class}, {model_class}
>>> import torch
>>> tokenizer = {
tokenize
r_class}.from_pretrained('{checkpoint}')
>>> tokenizer = {
processo
r_class}.from_pretrained('{checkpoint}')
>>> model = {model_class}.from_pretrained('{checkpoint}')
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
...
...
@@ -844,10 +844,10 @@ PT_SEQUENCE_CLASSIFICATION_SAMPLE = r"""
PT_MASKED_LM_SAMPLE
=
r
"""
Example::
>>> from transformers import {
tokenize
r_class}, {model_class}
>>> from transformers import {
processo
r_class}, {model_class}
>>> import torch
>>> tokenizer = {
tokenize
r_class}.from_pretrained('{checkpoint}')
>>> tokenizer = {
processo
r_class}.from_pretrained('{checkpoint}')
>>> model = {model_class}.from_pretrained('{checkpoint}')
>>> inputs = tokenizer("The capital of France is {mask}.", return_tensors="pt")
...
...
@@ -861,10 +861,10 @@ PT_MASKED_LM_SAMPLE = r"""
PT_BASE_MODEL_SAMPLE
=
r
"""
Example::
>>> from transformers import {
tokenize
r_class}, {model_class}
>>> from transformers import {
processo
r_class}, {model_class}
>>> import torch
>>> tokenizer = {
tokenize
r_class}.from_pretrained('{checkpoint}')
>>> tokenizer = {
processo
r_class}.from_pretrained('{checkpoint}')
>>> model = {model_class}.from_pretrained('{checkpoint}')
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
...
...
@@ -876,10 +876,10 @@ PT_BASE_MODEL_SAMPLE = r"""
PT_MULTIPLE_CHOICE_SAMPLE
=
r
"""
Example::
>>> from transformers import {
tokenize
r_class}, {model_class}
>>> from transformers import {
processo
r_class}, {model_class}
>>> import torch
>>> tokenizer = {
tokenize
r_class}.from_pretrained('{checkpoint}')
>>> tokenizer = {
processo
r_class}.from_pretrained('{checkpoint}')
>>> model = {model_class}.from_pretrained('{checkpoint}')
>>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
...
...
@@ -899,9 +899,9 @@ PT_CAUSAL_LM_SAMPLE = r"""
Example::
>>> import torch
>>> from transformers import {
tokenize
r_class}, {model_class}
>>> from transformers import {
processo
r_class}, {model_class}
>>> tokenizer = {
tokenize
r_class}.from_pretrained('{checkpoint}')
>>> tokenizer = {
processo
r_class}.from_pretrained('{checkpoint}')
>>> model = {model_class}.from_pretrained('{checkpoint}')
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
...
...
@@ -910,6 +910,79 @@ PT_CAUSAL_LM_SAMPLE = r"""
>>> logits = outputs.logits
"""
PT_SPEECH_BASE_MODEL_SAMPLE
=
r
"""
Example::
>>> from transformers import {processor_class}, {model_class}
>>> from datasets import load_dataset
>>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation")
>>> sampling_rate = dataset.features["audio"].sampling_rate
>>> processor = {processor_class}.from_pretrained('{checkpoint}')
>>> model = {model_class}.from_pretrained('{checkpoint}')
>>> # audio file is decoded on the fly
>>> inputs = processor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="pt")
>>> outputs = model(**inputs)
>>> last_hidden_states = outputs.last_hidden_state
"""
PT_SPEECH_CTC_SAMPLE
=
r
"""
Example::
>>> from transformers import {processor_class}, {model_class}
>>> from datasets import load_dataset
>>> import torch
>>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation")
>>> sampling_rate = dataset.features["audio"].sampling_rate
>>> processor = {processor_class}.from_pretrained('{checkpoint}')
>>> model = {model_class}.from_pretrained('{checkpoint}')
>>> # audio file is decoded on the fly
>>> inputs = processor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="pt")
>>> logits = model(**inputs).logits
>>> predicted_ids = torch.argmax(logits, dim=-1)
>>> # transcribe speech
>>> transcription = processor.batch_decode(predicted_ids)
>>> # compute loss
>>> with processor.as_target_processor():
... inputs["labels"] = processor(dataset[0]["text"], return_tensors="pt").input_ids
>>> loss = model(**inputs).loss
"""
PT_SPEECH_SEQ_CLASS_SAMPLE
=
r
"""
Example::
>>> from transformers import {processor_class}, {model_class}
>>> from datasets import load_dataset
>>> import torch
>>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation")
>>> sampling_rate = dataset.features["audio"].sampling_rate
>>> feature_extractor = {processor_class}.from_pretrained('{checkpoint}')
>>> model = {model_class}.from_pretrained('{checkpoint}')
>>> # audio file is decoded on the fly
>>> inputs = feature_extractor(dataset[0]["audio"]["array"], return_tensors="pt")
>>> logits = model(**inputs).logits
>>> predicted_class_ids = torch.argmax(logits, dim=-1)
>>> predicted_label = model.config.id2label[predicted_class_ids]
>>> # compute loss - target_label is e.g. "down"
>>> target_label = model.config.id2label[0]
>>> inputs["labels"] = torch.tensor([model.config.label2id[target_label]])
>>> loss = model(**inputs).loss
"""
PT_SAMPLE_DOCSTRINGS
=
{
"SequenceClassification"
:
PT_SEQUENCE_CLASSIFICATION_SAMPLE
,
"QuestionAnswering"
:
PT_QUESTION_ANSWERING_SAMPLE
,
...
...
@@ -918,16 +991,19 @@ PT_SAMPLE_DOCSTRINGS = {
"MaskedLM"
:
PT_MASKED_LM_SAMPLE
,
"LMHead"
:
PT_CAUSAL_LM_SAMPLE
,
"BaseModel"
:
PT_BASE_MODEL_SAMPLE
,
"SpeechBaseModel"
:
PT_SPEECH_BASE_MODEL_SAMPLE
,
"CTC"
:
PT_SPEECH_CTC_SAMPLE
,
"AudioClassification"
:
PT_SPEECH_SEQ_CLASS_SAMPLE
,
}
TF_TOKEN_CLASSIFICATION_SAMPLE
=
r
"""
Example::
>>> from transformers import {
tokenize
r_class}, {model_class}
>>> from transformers import {
processo
r_class}, {model_class}
>>> import tensorflow as tf
>>> tokenizer = {
tokenize
r_class}.from_pretrained('{checkpoint}')
>>> tokenizer = {
processo
r_class}.from_pretrained('{checkpoint}')
>>> model = {model_class}.from_pretrained('{checkpoint}')
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="tf")
...
...
@@ -942,10 +1018,10 @@ TF_TOKEN_CLASSIFICATION_SAMPLE = r"""
TF_QUESTION_ANSWERING_SAMPLE
=
r
"""
Example::
>>> from transformers import {
tokenize
r_class}, {model_class}
>>> from transformers import {
processo
r_class}, {model_class}
>>> import tensorflow as tf
>>> tokenizer = {
tokenize
r_class}.from_pretrained('{checkpoint}')
>>> tokenizer = {
processo
r_class}.from_pretrained('{checkpoint}')
>>> model = {model_class}.from_pretrained('{checkpoint}')
>>> question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet"
...
...
@@ -961,10 +1037,10 @@ TF_QUESTION_ANSWERING_SAMPLE = r"""
TF_SEQUENCE_CLASSIFICATION_SAMPLE
=
r
"""
Example::
>>> from transformers import {
tokenize
r_class}, {model_class}
>>> from transformers import {
processo
r_class}, {model_class}
>>> import tensorflow as tf
>>> tokenizer = {
tokenize
r_class}.from_pretrained('{checkpoint}')
>>> tokenizer = {
processo
r_class}.from_pretrained('{checkpoint}')
>>> model = {model_class}.from_pretrained('{checkpoint}')
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="tf")
...
...
@@ -978,10 +1054,10 @@ TF_SEQUENCE_CLASSIFICATION_SAMPLE = r"""
TF_MASKED_LM_SAMPLE
=
r
"""
Example::
>>> from transformers import {
tokenize
r_class}, {model_class}
>>> from transformers import {
processo
r_class}, {model_class}
>>> import tensorflow as tf
>>> tokenizer = {
tokenize
r_class}.from_pretrained('{checkpoint}')
>>> tokenizer = {
processo
r_class}.from_pretrained('{checkpoint}')
>>> model = {model_class}.from_pretrained('{checkpoint}')
>>> inputs = tokenizer("The capital of France is {mask}.", return_tensors="tf")
...
...
@@ -995,10 +1071,10 @@ TF_MASKED_LM_SAMPLE = r"""
TF_BASE_MODEL_SAMPLE
=
r
"""
Example::
>>> from transformers import {
tokenize
r_class}, {model_class}
>>> from transformers import {
processo
r_class}, {model_class}
>>> import tensorflow as tf
>>> tokenizer = {
tokenize
r_class}.from_pretrained('{checkpoint}')
>>> tokenizer = {
processo
r_class}.from_pretrained('{checkpoint}')
>>> model = {model_class}.from_pretrained('{checkpoint}')
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="tf")
...
...
@@ -1010,10 +1086,10 @@ TF_BASE_MODEL_SAMPLE = r"""
TF_MULTIPLE_CHOICE_SAMPLE
=
r
"""
Example::
>>> from transformers import {
tokenize
r_class}, {model_class}
>>> from transformers import {
processo
r_class}, {model_class}
>>> import tensorflow as tf
>>> tokenizer = {
tokenize
r_class}.from_pretrained('{checkpoint}')
>>> tokenizer = {
processo
r_class}.from_pretrained('{checkpoint}')
>>> model = {model_class}.from_pretrained('{checkpoint}')
>>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
...
...
@@ -1031,10 +1107,10 @@ TF_MULTIPLE_CHOICE_SAMPLE = r"""
TF_CAUSAL_LM_SAMPLE
=
r
"""
Example::
>>> from transformers import {
tokenize
r_class}, {model_class}
>>> from transformers import {
processo
r_class}, {model_class}
>>> import tensorflow as tf
>>> tokenizer = {
tokenize
r_class}.from_pretrained('{checkpoint}')
>>> tokenizer = {
processo
r_class}.from_pretrained('{checkpoint}')
>>> model = {model_class}.from_pretrained('{checkpoint}')
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="tf")
...
...
@@ -1056,9 +1132,9 @@ TF_SAMPLE_DOCSTRINGS = {
FLAX_TOKEN_CLASSIFICATION_SAMPLE
=
r
"""
Example::
>>> from transformers import {
tokenize
r_class}, {model_class}
>>> from transformers import {
processo
r_class}, {model_class}
>>> tokenizer = {
tokenize
r_class}.from_pretrained('{checkpoint}')
>>> tokenizer = {
processo
r_class}.from_pretrained('{checkpoint}')
>>> model = {model_class}.from_pretrained('{checkpoint}')
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors='jax')
...
...
@@ -1070,9 +1146,9 @@ FLAX_TOKEN_CLASSIFICATION_SAMPLE = r"""
FLAX_QUESTION_ANSWERING_SAMPLE
=
r
"""
Example::
>>> from transformers import {
tokenize
r_class}, {model_class}
>>> from transformers import {
processo
r_class}, {model_class}
>>> tokenizer = {
tokenize
r_class}.from_pretrained('{checkpoint}')
>>> tokenizer = {
processo
r_class}.from_pretrained('{checkpoint}')
>>> model = {model_class}.from_pretrained('{checkpoint}')
>>> question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet"
...
...
@@ -1086,9 +1162,9 @@ FLAX_QUESTION_ANSWERING_SAMPLE = r"""
FLAX_SEQUENCE_CLASSIFICATION_SAMPLE
=
r
"""
Example::
>>> from transformers import {
tokenize
r_class}, {model_class}
>>> from transformers import {
processo
r_class}, {model_class}
>>> tokenizer = {
tokenize
r_class}.from_pretrained('{checkpoint}')
>>> tokenizer = {
processo
r_class}.from_pretrained('{checkpoint}')
>>> model = {model_class}.from_pretrained('{checkpoint}')
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors='jax')
...
...
@@ -1100,9 +1176,9 @@ FLAX_SEQUENCE_CLASSIFICATION_SAMPLE = r"""
FLAX_MASKED_LM_SAMPLE
=
r
"""
Example::
>>> from transformers import {
tokenize
r_class}, {model_class}
>>> from transformers import {
processo
r_class}, {model_class}
>>> tokenizer = {
tokenize
r_class}.from_pretrained('{checkpoint}')
>>> tokenizer = {
processo
r_class}.from_pretrained('{checkpoint}')
>>> model = {model_class}.from_pretrained('{checkpoint}')
>>> inputs = tokenizer("The capital of France is {mask}.", return_tensors='jax')
...
...
@@ -1114,9 +1190,9 @@ FLAX_MASKED_LM_SAMPLE = r"""
FLAX_BASE_MODEL_SAMPLE
=
r
"""
Example::
>>> from transformers import {
tokenize
r_class}, {model_class}
>>> from transformers import {
processo
r_class}, {model_class}
>>> tokenizer = {
tokenize
r_class}.from_pretrained('{checkpoint}')
>>> tokenizer = {
processo
r_class}.from_pretrained('{checkpoint}')
>>> model = {model_class}.from_pretrained('{checkpoint}')
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors='jax')
...
...
@@ -1128,9 +1204,9 @@ FLAX_BASE_MODEL_SAMPLE = r"""
FLAX_MULTIPLE_CHOICE_SAMPLE
=
r
"""
Example::
>>> from transformers import {
tokenize
r_class}, {model_class}
>>> from transformers import {
processo
r_class}, {model_class}
>>> tokenizer = {
tokenize
r_class}.from_pretrained('{checkpoint}')
>>> tokenizer = {
processo
r_class}.from_pretrained('{checkpoint}')
>>> model = {model_class}.from_pretrained('{checkpoint}')
>>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
...
...
@@ -1146,9 +1222,9 @@ FLAX_MULTIPLE_CHOICE_SAMPLE = r"""
FLAX_CAUSAL_LM_SAMPLE
=
r
"""
Example::
>>> from transformers import {
tokenize
r_class}, {model_class}
>>> from transformers import {
processo
r_class}, {model_class}
>>> tokenizer = {
tokenize
r_class}.from_pretrained('{checkpoint}')
>>> tokenizer = {
processo
r_class}.from_pretrained('{checkpoint}')
>>> model = {model_class}.from_pretrained('{checkpoint}')
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="np")
...
...
@@ -1170,7 +1246,14 @@ FLAX_SAMPLE_DOCSTRINGS = {
def
add_code_sample_docstrings
(
*
docstr
,
tokenizer_class
=
None
,
checkpoint
=
None
,
output_type
=
None
,
config_class
=
None
,
mask
=
None
,
model_cls
=
None
*
docstr
,
processor_class
=
None
,
checkpoint
=
None
,
output_type
=
None
,
config_class
=
None
,
mask
=
None
,
model_cls
=
None
,
modality
=
None
):
def
docstring_decorator
(
fn
):
# model_class defaults to function's class if not specified otherwise
...
...
@@ -1183,9 +1266,11 @@ def add_code_sample_docstrings(
else
:
sample_docstrings
=
PT_SAMPLE_DOCSTRINGS
doc_kwargs
=
dict
(
model_class
=
model_class
,
tokenizer_class
=
tokenize
r_class
,
checkpoint
=
checkpoint
)
doc_kwargs
=
dict
(
model_class
=
model_class
,
processor_class
=
processo
r_class
,
checkpoint
=
checkpoint
)
if
"SequenceClassification"
in
model_class
:
if
"SequenceClassification"
in
model_class
and
modality
==
"audio"
:
code_sample
=
sample_docstrings
[
"AudioClassification"
]
elif
"SequenceClassification"
in
model_class
:
code_sample
=
sample_docstrings
[
"SequenceClassification"
]
elif
"QuestionAnswering"
in
model_class
:
code_sample
=
sample_docstrings
[
"QuestionAnswering"
]
...
...
@@ -1198,6 +1283,10 @@ def add_code_sample_docstrings(
code_sample
=
sample_docstrings
[
"MaskedLM"
]
elif
"LMHead"
in
model_class
or
"CausalLM"
in
model_class
:
code_sample
=
sample_docstrings
[
"LMHead"
]
elif
"CTC"
in
model_class
:
code_sample
=
sample_docstrings
[
"CTC"
]
elif
"Model"
in
model_class
and
modality
==
"audio"
:
code_sample
=
sample_docstrings
[
"SpeechBaseModel"
]
elif
"Model"
in
model_class
or
"Encoder"
in
model_class
:
code_sample
=
sample_docstrings
[
"BaseModel"
]
else
:
...
...
src/transformers/modeling_flax_utils.py
View file @
f5af8736
...
...
@@ -528,7 +528,7 @@ def overwrite_call_docstring(model_class, docstring):
def
append_call_sample_docstring
(
model_class
,
tokenizer_class
,
checkpoint
,
output_type
,
config_class
,
mask
=
None
):
model_class
.
__call__
=
copy_func
(
model_class
.
__call__
)
model_class
.
__call__
=
add_code_sample_docstrings
(
tokenize
r_class
=
tokenizer_class
,
processo
r_class
=
tokenizer_class
,
checkpoint
=
checkpoint
,
output_type
=
output_type
,
config_class
=
config_class
,
...
...
src/transformers/models/albert/modeling_albert.py
View file @
f5af8736
...
...
@@ -665,7 +665,7 @@ class AlbertModel(AlbertPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
ALBERT_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
BaseModelOutputWithPooling
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -916,7 +916,7 @@ class AlbertForMaskedLM(AlbertPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
ALBERT_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
MaskedLMOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -995,7 +995,7 @@ class AlbertForSequenceClassification(AlbertPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
ALBERT_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
SequenceClassifierOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1101,7 +1101,7 @@ class AlbertForTokenClassification(AlbertPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
ALBERT_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TokenClassifierOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1191,7 +1191,7 @@ class AlbertForQuestionAnswering(AlbertPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
ALBERT_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
QuestionAnsweringModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1290,7 +1290,7 @@ class AlbertForMultipleChoice(AlbertPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
ALBERT_INPUTS_DOCSTRING
.
format
(
"batch_size, num_choices, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
MultipleChoiceModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
src/transformers/models/albert/modeling_tf_albert.py
View file @
f5af8736
...
...
@@ -783,7 +783,7 @@ class TFAlbertModel(TFAlbertPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
ALBERT_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFBaseModelOutputWithPooling
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1000,7 +1000,7 @@ class TFAlbertForMaskedLM(TFAlbertPreTrainedModel, TFMaskedLanguageModelingLoss)
@
add_start_docstrings_to_model_forward
(
ALBERT_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFMaskedLMOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1105,7 +1105,7 @@ class TFAlbertForSequenceClassification(TFAlbertPreTrainedModel, TFSequenceClass
@
add_start_docstrings_to_model_forward
(
ALBERT_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFSequenceClassifierOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1214,7 +1214,7 @@ class TFAlbertForTokenClassification(TFAlbertPreTrainedModel, TFTokenClassificat
@
add_start_docstrings_to_model_forward
(
ALBERT_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFTokenClassifierOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1315,7 +1315,7 @@ class TFAlbertForQuestionAnswering(TFAlbertPreTrainedModel, TFQuestionAnsweringL
@
add_start_docstrings_to_model_forward
(
ALBERT_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFQuestionAnsweringModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1443,7 +1443,7 @@ class TFAlbertForMultipleChoice(TFAlbertPreTrainedModel, TFMultipleChoiceLoss):
@
add_start_docstrings_to_model_forward
(
ALBERT_INPUTS_DOCSTRING
.
format
(
"batch_size, num_choices, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFMultipleChoiceModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
src/transformers/models/bart/modeling_bart.py
View file @
f5af8736
...
...
@@ -1130,7 +1130,7 @@ class BartModel(BartPretrainedModel):
@
add_start_docstrings_to_model_forward
(
BART_INPUTS_DOCSTRING
)
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
Seq2SeqModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1400,7 +1400,7 @@ class BartForSequenceClassification(BartPretrainedModel):
@
add_start_docstrings_to_model_forward
(
BART_INPUTS_DOCSTRING
)
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
Seq2SeqSequenceClassifierOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1512,7 +1512,7 @@ class BartForQuestionAnswering(BartPretrainedModel):
@
add_start_docstrings_to_model_forward
(
BART_INPUTS_DOCSTRING
)
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
Seq2SeqQuestionAnsweringModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
src/transformers/models/bart/modeling_tf_bart.py
View file @
f5af8736
...
...
@@ -1196,7 +1196,7 @@ class TFBartModel(TFBartPretrainedModel):
@
add_start_docstrings_to_model_forward
(
BART_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFSeq2SeqModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
src/transformers/models/bert/modeling_bert.py
View file @
f5af8736
...
...
@@ -886,7 +886,7 @@ class BertModel(BertPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
BERT_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
BaseModelOutputWithPoolingAndCrossAttentions
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1302,7 +1302,7 @@ class BertForMaskedLM(BertPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
BERT_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
MaskedLMOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1501,7 +1501,7 @@ class BertForSequenceClassification(BertPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
BERT_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
SequenceClassifierOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1600,7 +1600,7 @@ class BertForMultipleChoice(BertPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
BERT_INPUTS_DOCSTRING
.
format
(
"batch_size, num_choices, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
MultipleChoiceModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1698,7 +1698,7 @@ class BertForTokenClassification(BertPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
BERT_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TokenClassifierOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1788,7 +1788,7 @@ class BertForQuestionAnswering(BertPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
BERT_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
QuestionAnsweringModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
src/transformers/models/bert/modeling_tf_bert.py
View file @
f5af8736
...
...
@@ -1064,7 +1064,7 @@ class TFBertModel(TFBertPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
BERT_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFBaseModelOutputWithPoolingAndCrossAttentions
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1335,7 +1335,7 @@ class TFBertForMaskedLM(TFBertPreTrainedModel, TFMaskedLanguageModelingLoss):
@
add_start_docstrings_to_model_forward
(
BERT_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFMaskedLMOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1451,7 +1451,7 @@ class TFBertLMHeadModel(TFBertPreTrainedModel, TFCausalLanguageModelingLoss):
}
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFCausalLMOutputWithCrossAttentions
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1704,7 +1704,7 @@ class TFBertForSequenceClassification(TFBertPreTrainedModel, TFSequenceClassific
@
add_start_docstrings_to_model_forward
(
BERT_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFSequenceClassifierOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1814,7 +1814,7 @@ class TFBertForMultipleChoice(TFBertPreTrainedModel, TFMultipleChoiceLoss):
@
add_start_docstrings_to_model_forward
(
BERT_INPUTS_DOCSTRING
.
format
(
"batch_size, num_choices, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFMultipleChoiceModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1973,7 +1973,7 @@ class TFBertForTokenClassification(TFBertPreTrainedModel, TFTokenClassificationL
@
add_start_docstrings_to_model_forward
(
BERT_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFTokenClassifierOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -2080,7 +2080,7 @@ class TFBertForQuestionAnswering(TFBertPreTrainedModel, TFQuestionAnsweringLoss)
@
add_start_docstrings_to_model_forward
(
BERT_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFQuestionAnsweringModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
src/transformers/models/bert_generation/modeling_bert_generation.py
View file @
f5af8736
...
...
@@ -300,7 +300,7 @@ class BertGenerationEncoder(BertGenerationPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
BERT_GENERATION_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
BaseModelOutputWithPastAndCrossAttentions
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
src/transformers/models/big_bird/modeling_big_bird.py
View file @
f5af8736
...
...
@@ -1974,7 +1974,7 @@ class BigBirdModel(BigBirdPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
BIG_BIRD_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
BaseModelOutputWithPoolingAndCrossAttentions
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -2380,7 +2380,7 @@ class BigBirdForMaskedLM(BigBirdPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
BIG_BIRD_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
MaskedLMOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -2646,7 +2646,7 @@ class BigBirdForSequenceClassification(BigBirdPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
BIG_BIRD_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
SequenceClassifierOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -2743,7 +2743,7 @@ class BigBirdForMultipleChoice(BigBirdPreTrainedModel):
BIG_BIRD_INPUTS_DOCSTRING
.
format
(
"batch_size, num_choices, sequence_length"
)
)
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
MultipleChoiceModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -2838,7 +2838,7 @@ class BigBirdForTokenClassification(BigBirdPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
BIG_BIRD_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TokenClassifierOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -2946,7 +2946,7 @@ class BigBirdForQuestionAnswering(BigBirdPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
BIG_BIRD_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
"google/bigbird-base-trivia-itc"
,
output_type
=
BigBirdForQuestionAnsweringModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
src/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py
View file @
f5af8736
...
...
@@ -2338,7 +2338,7 @@ class BigBirdPegasusModel(BigBirdPegasusPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
BIGBIRD_PEGASUS_INPUTS_DOCSTRING
)
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
Seq2SeqModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -2611,7 +2611,7 @@ class BigBirdPegasusForSequenceClassification(BigBirdPegasusPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
BIGBIRD_PEGASUS_INPUTS_DOCSTRING
)
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
Seq2SeqSequenceClassifierOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -2724,7 +2724,7 @@ class BigBirdPegasusForQuestionAnswering(BigBirdPegasusPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
BIGBIRD_PEGASUS_INPUTS_DOCSTRING
)
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
Seq2SeqQuestionAnsweringModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
src/transformers/models/blenderbot/modeling_tf_blenderbot.py
View file @
f5af8736
...
...
@@ -1206,7 +1206,7 @@ class TFBlenderbotModel(TFBlenderbotPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
BLENDERBOT_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFSeq2SeqModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
src/transformers/models/blenderbot_small/modeling_tf_blenderbot_small.py
View file @
f5af8736
...
...
@@ -1194,7 +1194,7 @@ class TFBlenderbotSmallModel(TFBlenderbotSmallPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
BLENDERBOT_SMALL_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFSeq2SeqModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
src/transformers/models/canine/modeling_canine.py
View file @
f5af8736
...
...
@@ -1096,7 +1096,7 @@ class CanineModel(CaninePreTrainedModel):
@
add_start_docstrings_to_model_forward
(
CANINE_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
CanineModelOutputWithPooling
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1277,7 +1277,7 @@ class CanineForSequenceClassification(CaninePreTrainedModel):
@
add_start_docstrings_to_model_forward
(
CANINE_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
SequenceClassifierOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1373,7 +1373,7 @@ class CanineForMultipleChoice(CaninePreTrainedModel):
@
add_start_docstrings_to_model_forward
(
CANINE_INPUTS_DOCSTRING
.
format
(
"batch_size, num_choices, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
MultipleChoiceModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1465,7 +1465,7 @@ class CanineForTokenClassification(CaninePreTrainedModel):
@
add_start_docstrings_to_model_forward
(
CANINE_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TokenClassifierOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1552,7 +1552,7 @@ class CanineForQuestionAnswering(CaninePreTrainedModel):
@
add_start_docstrings_to_model_forward
(
CANINE_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
QuestionAnsweringModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
src/transformers/models/convbert/modeling_convbert.py
View file @
f5af8736
...
...
@@ -793,7 +793,7 @@ class ConvBertModel(ConvBertPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
CONVBERT_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
BaseModelOutputWithCrossAttentions
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -896,7 +896,7 @@ class ConvBertForMaskedLM(ConvBertPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
CONVBERT_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
MaskedLMOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -999,7 +999,7 @@ class ConvBertForSequenceClassification(ConvBertPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
CONVBERT_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
SequenceClassifierOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1096,7 +1096,7 @@ class ConvBertForMultipleChoice(ConvBertPreTrainedModel):
CONVBERT_INPUTS_DOCSTRING
.
format
(
"batch_size, num_choices, sequence_length"
)
)
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
MultipleChoiceModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1191,7 +1191,7 @@ class ConvBertForTokenClassification(ConvBertPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
CONVBERT_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TokenClassifierOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1278,7 +1278,7 @@ class ConvBertForQuestionAnswering(ConvBertPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
CONVBERT_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
QuestionAnsweringModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
src/transformers/models/convbert/modeling_tf_convbert.py
View file @
f5af8736
...
...
@@ -754,7 +754,7 @@ class TFConvBertModel(TFConvBertPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
CONVBERT_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFBaseModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -886,7 +886,7 @@ class TFConvBertForMaskedLM(TFConvBertPreTrainedModel, TFMaskedLanguageModelingL
@
add_start_docstrings_to_model_forward
(
CONVBERT_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFMaskedLMOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1010,7 +1010,7 @@ class TFConvBertForSequenceClassification(TFConvBertPreTrainedModel, TFSequenceC
@
add_start_docstrings_to_model_forward
(
CONVBERT_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFSequenceClassifierOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1119,7 +1119,7 @@ class TFConvBertForMultipleChoice(TFConvBertPreTrainedModel, TFMultipleChoiceLos
CONVBERT_INPUTS_DOCSTRING
.
format
(
"batch_size, num_choices, sequence_length"
)
)
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFMultipleChoiceModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1257,7 +1257,7 @@ class TFConvBertForTokenClassification(TFConvBertPreTrainedModel, TFTokenClassif
@
add_start_docstrings_to_model_forward
(
CONVBERT_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFTokenClassifierOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1352,7 +1352,7 @@ class TFConvBertForQuestionAnswering(TFConvBertPreTrainedModel, TFQuestionAnswer
@
add_start_docstrings_to_model_forward
(
CONVBERT_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFQuestionAnsweringModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
src/transformers/models/ctrl/modeling_ctrl.py
View file @
f5af8736
...
...
@@ -355,7 +355,7 @@ class CTRLModel(CTRLPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
CTRL_INPUTS_DOCSTRING
)
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
BaseModelOutputWithPast
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -516,7 +516,7 @@ class CTRLLMHeadModel(CTRLPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
CTRL_INPUTS_DOCSTRING
)
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
CausalLMOutputWithPast
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -619,7 +619,7 @@ class CTRLForSequenceClassification(CTRLPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
CTRL_INPUTS_DOCSTRING
)
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
SequenceClassifierOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
src/transformers/models/ctrl/modeling_tf_ctrl.py
View file @
f5af8736
...
...
@@ -543,7 +543,7 @@ class TFCTRLModel(TFCTRLPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
CTRL_INPUTS_DOCSTRING
)
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFBaseModelOutputWithPast
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -671,7 +671,7 @@ class TFCTRLLMHeadModel(TFCTRLPreTrainedModel, TFCausalLanguageModelingLoss):
@
add_start_docstrings_to_model_forward
(
CTRL_INPUTS_DOCSTRING
)
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFCausalLMOutputWithPast
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -795,7 +795,7 @@ class TFCTRLForSequenceClassification(TFCTRLPreTrainedModel, TFSequenceClassific
@
add_start_docstrings_to_model_forward
(
CTRL_INPUTS_DOCSTRING
)
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFSequenceClassifierOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
src/transformers/models/deberta/modeling_deberta.py
View file @
f5af8736
...
...
@@ -866,7 +866,7 @@ class DebertaModel(DebertaPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
DEBERTA_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
SequenceClassifierOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -972,7 +972,7 @@ class DebertaForMaskedLM(DebertaPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
DEBERTA_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
MaskedLMOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1112,7 +1112,7 @@ class DebertaForSequenceClassification(DebertaPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
DEBERTA_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
SequenceClassifierOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1207,7 +1207,7 @@ class DebertaForTokenClassification(DebertaPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
DEBERTA_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TokenClassifierOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1294,7 +1294,7 @@ class DebertaForQuestionAnswering(DebertaPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
DEBERTA_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
QuestionAnsweringModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
src/transformers/models/deberta/modeling_tf_deberta.py
View file @
f5af8736
...
...
@@ -1101,7 +1101,7 @@ class TFDebertaModel(TFDebertaPreTrainedModel):
@
add_start_docstrings_to_model_forward
(
DEBERTA_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFBaseModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1173,7 +1173,7 @@ class TFDebertaForMaskedLM(TFDebertaPreTrainedModel, TFMaskedLanguageModelingLos
@
add_start_docstrings_to_model_forward
(
DEBERTA_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFMaskedLMOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1275,7 +1275,7 @@ class TFDebertaForSequenceClassification(TFDebertaPreTrainedModel, TFSequenceCla
@
add_start_docstrings_to_model_forward
(
DEBERTA_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFSequenceClassifierOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1372,7 +1372,7 @@ class TFDebertaForTokenClassification(TFDebertaPreTrainedModel, TFTokenClassific
@
add_start_docstrings_to_model_forward
(
DEBERTA_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFTokenClassifierOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1465,7 +1465,7 @@ class TFDebertaForQuestionAnswering(TFDebertaPreTrainedModel, TFQuestionAnswerin
@
add_start_docstrings_to_model_forward
(
DEBERTA_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
tokenize
r_class
=
_TOKENIZER_FOR_DOC
,
processo
r_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFQuestionAnsweringModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
Prev
1
2
3
4
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment