Unverified Commit 242cc6e2 authored by Stefan Schweter's avatar Stefan Schweter Committed by GitHub
Browse files

Documentation: RemBERT fixes (#17641)

* rembert: fix python codeblock

* rembert: use correct google/rembert checkpoint name in documentation

* rembert: use correct google/rembert checkpoint name in TF documentation
parent b76290f4
...@@ -21,7 +21,7 @@ from ...utils import logging ...@@ -21,7 +21,7 @@ from ...utils import logging
logger = logging.get_logger(__name__) logger = logging.get_logger(__name__)
REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP = { REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
"rembert": "https://huggingface.co/google/rembert/resolve/main/config.json", "google/rembert": "https://huggingface.co/google/rembert/resolve/main/config.json",
# See all RemBERT models at https://huggingface.co/models?filter=rembert # See all RemBERT models at https://huggingface.co/models?filter=rembert
} }
...@@ -80,16 +80,17 @@ class RemBertConfig(PretrainedConfig): ...@@ -80,16 +80,17 @@ class RemBertConfig(PretrainedConfig):
Example: Example:
```python ```python
>>> from transformers import RemBertModel, RemBertConfig
``` >>> # Initializing a RemBERT rembert style configuration
>>> configuration = RemBertConfig()
>>> from transformers import RemBertModel, RemBertConfig >>> # Initializing a RemBERT rembert style >>> # Initializing a model from the rembert style configuration
configuration >>> configuration = RemBertConfig() >>> model = RemBertModel(configuration)
>>> # Initializing a model from the rembert style configuration >>> model = RemBertModel(configuration) >>> # Accessing the model configuration
>>> configuration = model.config
>>> # Accessing the model configuration >>> configuration = model.config ```"""
"""
model_type = "rembert" model_type = "rembert"
def __init__( def __init__(
......
...@@ -786,7 +786,7 @@ class RemBertModel(RemBertPreTrainedModel): ...@@ -786,7 +786,7 @@ class RemBertModel(RemBertPreTrainedModel):
@add_start_docstrings_to_model_forward(REMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_start_docstrings_to_model_forward(REMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings( @add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC, processor_class=_TOKENIZER_FOR_DOC,
checkpoint="rembert", checkpoint="google/rembert",
output_type=BaseModelOutputWithPastAndCrossAttentions, output_type=BaseModelOutputWithPastAndCrossAttentions,
config_class=_CONFIG_FOR_DOC, config_class=_CONFIG_FOR_DOC,
) )
...@@ -939,7 +939,7 @@ class RemBertForMaskedLM(RemBertPreTrainedModel): ...@@ -939,7 +939,7 @@ class RemBertForMaskedLM(RemBertPreTrainedModel):
@add_start_docstrings_to_model_forward(REMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_start_docstrings_to_model_forward(REMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings( @add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC, processor_class=_TOKENIZER_FOR_DOC,
checkpoint="rembert", checkpoint="google/rembert",
output_type=MaskedLMOutput, output_type=MaskedLMOutput,
config_class=_CONFIG_FOR_DOC, config_class=_CONFIG_FOR_DOC,
) )
...@@ -1184,7 +1184,7 @@ class RemBertForSequenceClassification(RemBertPreTrainedModel): ...@@ -1184,7 +1184,7 @@ class RemBertForSequenceClassification(RemBertPreTrainedModel):
@add_start_docstrings_to_model_forward(REMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_start_docstrings_to_model_forward(REMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings( @add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC, processor_class=_TOKENIZER_FOR_DOC,
checkpoint="rembert", checkpoint="google/rembert",
output_type=SequenceClassifierOutput, output_type=SequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC, config_class=_CONFIG_FOR_DOC,
) )
...@@ -1281,7 +1281,7 @@ class RemBertForMultipleChoice(RemBertPreTrainedModel): ...@@ -1281,7 +1281,7 @@ class RemBertForMultipleChoice(RemBertPreTrainedModel):
@add_start_docstrings_to_model_forward(REMBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_start_docstrings_to_model_forward(REMBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
@add_code_sample_docstrings( @add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC, processor_class=_TOKENIZER_FOR_DOC,
checkpoint="rembert", checkpoint="google/rembert",
output_type=MultipleChoiceModelOutput, output_type=MultipleChoiceModelOutput,
config_class=_CONFIG_FOR_DOC, config_class=_CONFIG_FOR_DOC,
) )
...@@ -1374,7 +1374,7 @@ class RemBertForTokenClassification(RemBertPreTrainedModel): ...@@ -1374,7 +1374,7 @@ class RemBertForTokenClassification(RemBertPreTrainedModel):
@add_start_docstrings_to_model_forward(REMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_start_docstrings_to_model_forward(REMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings( @add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC, processor_class=_TOKENIZER_FOR_DOC,
checkpoint="rembert", checkpoint="google/rembert",
output_type=TokenClassifierOutput, output_type=TokenClassifierOutput,
config_class=_CONFIG_FOR_DOC, config_class=_CONFIG_FOR_DOC,
) )
...@@ -1453,7 +1453,7 @@ class RemBertForQuestionAnswering(RemBertPreTrainedModel): ...@@ -1453,7 +1453,7 @@ class RemBertForQuestionAnswering(RemBertPreTrainedModel):
@add_start_docstrings_to_model_forward(REMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_start_docstrings_to_model_forward(REMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings( @add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC, processor_class=_TOKENIZER_FOR_DOC,
checkpoint="rembert", checkpoint="google/rembert",
output_type=QuestionAnsweringModelOutput, output_type=QuestionAnsweringModelOutput,
config_class=_CONFIG_FOR_DOC, config_class=_CONFIG_FOR_DOC,
) )
......
...@@ -938,7 +938,7 @@ class TFRemBertModel(TFRemBertPreTrainedModel): ...@@ -938,7 +938,7 @@ class TFRemBertModel(TFRemBertPreTrainedModel):
@add_start_docstrings_to_model_forward(REMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_start_docstrings_to_model_forward(REMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings( @add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC, processor_class=_TOKENIZER_FOR_DOC,
checkpoint="rembert", checkpoint="google/rembert",
output_type=TFBaseModelOutputWithPoolingAndCrossAttentions, output_type=TFBaseModelOutputWithPoolingAndCrossAttentions,
config_class=_CONFIG_FOR_DOC, config_class=_CONFIG_FOR_DOC,
) )
...@@ -1041,7 +1041,7 @@ class TFRemBertForMaskedLM(TFRemBertPreTrainedModel, TFMaskedLanguageModelingLos ...@@ -1041,7 +1041,7 @@ class TFRemBertForMaskedLM(TFRemBertPreTrainedModel, TFMaskedLanguageModelingLos
@add_start_docstrings_to_model_forward(REMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_start_docstrings_to_model_forward(REMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings( @add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC, processor_class=_TOKENIZER_FOR_DOC,
checkpoint="rembert", checkpoint="google/rembert",
output_type=TFMaskedLMOutput, output_type=TFMaskedLMOutput,
config_class=_CONFIG_FOR_DOC, config_class=_CONFIG_FOR_DOC,
) )
...@@ -1131,7 +1131,7 @@ class TFRemBertForCausalLM(TFRemBertPreTrainedModel, TFCausalLanguageModelingLos ...@@ -1131,7 +1131,7 @@ class TFRemBertForCausalLM(TFRemBertPreTrainedModel, TFCausalLanguageModelingLos
@unpack_inputs @unpack_inputs
@add_code_sample_docstrings( @add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC, processor_class=_TOKENIZER_FOR_DOC,
checkpoint="rembert", checkpoint="google/rembert",
output_type=TFCausalLMOutputWithCrossAttentions, output_type=TFCausalLMOutputWithCrossAttentions,
config_class=_CONFIG_FOR_DOC, config_class=_CONFIG_FOR_DOC,
) )
...@@ -1262,7 +1262,7 @@ class TFRemBertForSequenceClassification(TFRemBertPreTrainedModel, TFSequenceCla ...@@ -1262,7 +1262,7 @@ class TFRemBertForSequenceClassification(TFRemBertPreTrainedModel, TFSequenceCla
@add_start_docstrings_to_model_forward(REMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_start_docstrings_to_model_forward(REMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings( @add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC, processor_class=_TOKENIZER_FOR_DOC,
checkpoint="rembert", checkpoint="google/rembert",
output_type=TFSequenceClassifierOutput, output_type=TFSequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC, config_class=_CONFIG_FOR_DOC,
) )
...@@ -1352,7 +1352,7 @@ class TFRemBertForMultipleChoice(TFRemBertPreTrainedModel, TFMultipleChoiceLoss) ...@@ -1352,7 +1352,7 @@ class TFRemBertForMultipleChoice(TFRemBertPreTrainedModel, TFMultipleChoiceLoss)
@add_start_docstrings_to_model_forward(REMBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_start_docstrings_to_model_forward(REMBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
@add_code_sample_docstrings( @add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC, processor_class=_TOKENIZER_FOR_DOC,
checkpoint="rembert", checkpoint="google/rembert",
output_type=TFMultipleChoiceModelOutput, output_type=TFMultipleChoiceModelOutput,
config_class=_CONFIG_FOR_DOC, config_class=_CONFIG_FOR_DOC,
) )
...@@ -1471,7 +1471,7 @@ class TFRemBertForTokenClassification(TFRemBertPreTrainedModel, TFTokenClassific ...@@ -1471,7 +1471,7 @@ class TFRemBertForTokenClassification(TFRemBertPreTrainedModel, TFTokenClassific
@add_start_docstrings_to_model_forward(REMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_start_docstrings_to_model_forward(REMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings( @add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC, processor_class=_TOKENIZER_FOR_DOC,
checkpoint="rembert", checkpoint="google/rembert",
output_type=TFTokenClassifierOutput, output_type=TFTokenClassifierOutput,
config_class=_CONFIG_FOR_DOC, config_class=_CONFIG_FOR_DOC,
) )
...@@ -1550,7 +1550,7 @@ class TFRemBertForQuestionAnswering(TFRemBertPreTrainedModel, TFQuestionAnswerin ...@@ -1550,7 +1550,7 @@ class TFRemBertForQuestionAnswering(TFRemBertPreTrainedModel, TFQuestionAnswerin
@add_start_docstrings_to_model_forward(REMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_start_docstrings_to_model_forward(REMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings( @add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC, processor_class=_TOKENIZER_FOR_DOC,
checkpoint="rembert", checkpoint="google/rembert",
output_type=TFQuestionAnsweringModelOutput, output_type=TFQuestionAnsweringModelOutput,
config_class=_CONFIG_FOR_DOC, config_class=_CONFIG_FOR_DOC,
) )
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment