Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
transformers
Commits
68b21b37
Unverified
Commit
68b21b37
authored
Feb 14, 2023
by
Sylvain Gugger
Committed by
GitHub
Feb 14, 2023
Browse files
Final cleanup of TOKENIZER_FOR_DOC (#21565)
FInal cleanup of TOKENIZER_FOR_DOC
parent
c6f163c7
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
0 additions
and
22 deletions
+0
-22
src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py
...rmers/models/speech_to_text/modeling_tf_speech_to_text.py
+0
-2
templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_tf_{{cookiecutter.lowercase_modelname}}.py
...ame}}/modeling_tf_{{cookiecutter.lowercase_modelname}}.py
+0
-9
templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_{{cookiecutter.lowercase_modelname}}.py
...elname}}/modeling_{{cookiecutter.lowercase_modelname}}.py
+0
-11
No files found.
src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py
View file @
68b21b37
...
...
@@ -50,7 +50,6 @@ from .configuration_speech_to_text import Speech2TextConfig
logger
=
logging
.
get_logger
(
__name__
)
_CONFIG_FOR_DOC
=
"Speech2TextConfig"
_TOKENIZER_FOR_DOC
=
"Speech2TextTokenizer"
_CHECKPOINT_FOR_DOC
=
"facebook/s2t-small-librispeech-asr"
...
...
@@ -1257,7 +1256,6 @@ class TFSpeech2TextModel(TFSpeech2TextPreTrainedModel):
@
unpack_inputs
@
add_start_docstrings_to_model_forward
(
SPEECH_TO_TEXT_INPUTS_DOCSTRING
)
@
add_code_sample_docstrings
(
processor_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFSeq2SeqModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_tf_{{cookiecutter.lowercase_modelname}}.py
View file @
68b21b37
...
...
@@ -62,7 +62,6 @@ logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC
=
"{{cookiecutter.checkpoint_identifier}}"
_CONFIG_FOR_DOC
=
"{{cookiecutter.camelcase_modelname}}Config"
_TOKENIZER_FOR_DOC
=
"{{cookiecutter.camelcase_modelname}}Tokenizer"
TF_
{{
cookiecutter
.
uppercase_modelname
}}
_PRETRAINED_MODEL_ARCHIVE_LIST
=
[
"{{cookiecutter.checkpoint_identifier}}"
,
...
...
@@ -941,7 +940,6 @@ class TF{{cookiecutter.camelcase_modelname}}Model(TF{{cookiecutter.camelcase_mod
@
unpack_inputs
@
add_start_docstrings_to_model_forward
({{
cookiecutter
.
uppercase_modelname
}}
_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
processor_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFBaseModelOutputWithPastAndCrossAttentions
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1043,7 +1041,6 @@ class TF{{cookiecutter.camelcase_modelname}}ForMaskedLM(TF{{cookiecutter.camelca
@
unpack_inputs
@
add_start_docstrings_to_model_forward
({{
cookiecutter
.
uppercase_modelname
}}
_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
processor_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFMaskedLMOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1135,7 +1132,6 @@ class TF{{cookiecutter.camelcase_modelname}}ForCausalLM(TF{{cookiecutter.camelca
@
unpack_inputs
@
add_code_sample_docstrings
(
processor_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFCausalLMOutputWithCrossAttentions
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1281,7 +1277,6 @@ class TF{{cookiecutter.camelcase_modelname}}ForSequenceClassification(TF{{cookie
@
unpack_inputs
@
add_start_docstrings_to_model_forward
({{
cookiecutter
.
uppercase_modelname
}}
_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
processor_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFSequenceClassifierOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1370,7 +1365,6 @@ class TF{{cookiecutter.camelcase_modelname}}ForMultipleChoice(TF{{cookiecutter.c
@
unpack_inputs
@
add_start_docstrings_to_model_forward
({{
cookiecutter
.
uppercase_modelname
}}
_INPUTS_DOCSTRING
.
format
(
"batch_size, num_choices, sequence_length"
))
@
add_code_sample_docstrings
(
processor_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFMultipleChoiceModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1496,7 +1490,6 @@ class TF{{cookiecutter.camelcase_modelname}}ForTokenClassification(TF{{cookiecut
@
unpack_inputs
@
add_start_docstrings_to_model_forward
({{
cookiecutter
.
uppercase_modelname
}}
_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
processor_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFTokenClassifierOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1576,7 +1569,6 @@ class TF{{cookiecutter.camelcase_modelname}}ForQuestionAnswering(TF{{cookiecutte
@
unpack_inputs
@
add_start_docstrings_to_model_forward
({{
cookiecutter
.
uppercase_modelname
}}
_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
processor_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFQuestionAnsweringModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -2766,7 +2758,6 @@ class TF{{cookiecutter.camelcase_modelname}}Model(TF{{cookiecutter.camelcase_mod
@
unpack_inputs
@
add_start_docstrings_to_model_forward
({{
cookiecutter
.
uppercase_modelname
}}
_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
processor_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TFSeq2SeqModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_{{cookiecutter.lowercase_modelname}}.py
View file @
68b21b37
...
...
@@ -56,7 +56,6 @@ logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC
=
"{{cookiecutter.checkpoint_identifier}}"
_CONFIG_FOR_DOC
=
"{{cookiecutter.camelcase_modelname}}Config"
_TOKENIZER_FOR_DOC
=
"{{cookiecutter.camelcase_modelname}}Tokenizer"
{{
cookiecutter
.
uppercase_modelname
}}
_PRETRAINED_MODEL_ARCHIVE_LIST
=
[
"{{cookiecutter.checkpoint_identifier}}"
,
...
...
@@ -793,7 +792,6 @@ class {{cookiecutter.camelcase_modelname}}Model({{cookiecutter.camelcase_modelna
@
add_start_docstrings_to_model_forward
({{
cookiecutter
.
uppercase_modelname
}}
_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
processor_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
BaseModelOutputWithPastAndCrossAttentions
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -952,7 +950,6 @@ class {{cookiecutter.camelcase_modelname}}ForMaskedLM({{cookiecutter.camelcase_m
@
add_start_docstrings_to_model_forward
({{
cookiecutter
.
uppercase_modelname
}}
_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
processor_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
MaskedLMOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1224,7 +1221,6 @@ class {{cookiecutter.camelcase_modelname}}ForSequenceClassification({{cookiecutt
@
add_start_docstrings_to_model_forward
({{
cookiecutter
.
uppercase_modelname
}}
_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
processor_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
SequenceClassifierOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1317,7 +1313,6 @@ class {{cookiecutter.camelcase_modelname}}ForMultipleChoice({{cookiecutter.camel
@
add_start_docstrings_to_model_forward
({{
cookiecutter
.
uppercase_modelname
}}
_INPUTS_DOCSTRING
.
format
(
"batch_size, num_choices, sequence_length"
))
@
add_code_sample_docstrings
(
processor_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
MultipleChoiceModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1408,7 +1403,6 @@ class {{cookiecutter.camelcase_modelname}}ForTokenClassification({{cookiecutter.
@
add_start_docstrings_to_model_forward
({{
cookiecutter
.
uppercase_modelname
}}
_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
processor_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
TokenClassifierOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1487,7 +1481,6 @@ class {{cookiecutter.camelcase_modelname}}ForQuestionAnswering({{cookiecutter.ca
@
add_start_docstrings_to_model_forward
({{
cookiecutter
.
uppercase_modelname
}}
_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_code_sample_docstrings
(
processor_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
QuestionAnsweringModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -1601,7 +1594,6 @@ logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC
=
"{{cookiecutter.checkpoint_identifier}}"
_CONFIG_FOR_DOC
=
"{{cookiecutter.camelcase_modelname}}Config"
_TOKENIZER_FOR_DOC
=
"{{cookiecutter.camelcase_modelname}}Tokenizer"
{{
cookiecutter
.
uppercase_modelname
}}
_PRETRAINED_MODEL_ARCHIVE_LIST
=
[
...
...
@@ -2654,7 +2646,6 @@ class {{cookiecutter.camelcase_modelname}}Model({{cookiecutter.camelcase_modelna
@
add_start_docstrings_to_model_forward
({{
cookiecutter
.
uppercase_modelname
}}
_INPUTS_DOCSTRING
)
@
add_code_sample_docstrings
(
processor_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
Seq2SeqModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -2934,7 +2925,6 @@ class {{cookiecutter.camelcase_modelname}}ForSequenceClassification({{cookiecutt
@
add_start_docstrings_to_model_forward
({{
cookiecutter
.
uppercase_modelname
}}
_INPUTS_DOCSTRING
)
@
add_code_sample_docstrings
(
processor_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
Seq2SeqSequenceClassifierOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
@@ -3051,7 +3041,6 @@ class {{cookiecutter.camelcase_modelname}}ForQuestionAnswering({{cookiecutter.ca
@
add_start_docstrings_to_model_forward
({{
cookiecutter
.
uppercase_modelname
}}
_INPUTS_DOCSTRING
)
@
add_code_sample_docstrings
(
processor_class
=
_TOKENIZER_FOR_DOC
,
checkpoint
=
_CHECKPOINT_FOR_DOC
,
output_type
=
Seq2SeqQuestionAnsweringModelOutput
,
config_class
=
_CONFIG_FOR_DOC
,
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment