Unverified Commit 42049b8e authored by Stas Bekman's avatar Stas Bekman Committed by GitHub
Browse files

use the correct add_start_docstrings (#7174)

parent fdaf8ab3
...@@ -7,7 +7,7 @@ from typing import Dict, List, Optional, Tuple, Union ...@@ -7,7 +7,7 @@ from typing import Dict, List, Optional, Tuple, Union
import sentencepiece import sentencepiece
from .file_utils import add_start_docstrings_to_callable from .file_utils import add_start_docstrings
from .tokenization_utils import BatchEncoding, PreTrainedTokenizer from .tokenization_utils import BatchEncoding, PreTrainedTokenizer
from .tokenization_utils_base import PREPARE_SEQ2SEQ_BATCH_DOCSTRING from .tokenization_utils_base import PREPARE_SEQ2SEQ_BATCH_DOCSTRING
...@@ -125,7 +125,7 @@ class MarianTokenizer(PreTrainedTokenizer): ...@@ -125,7 +125,7 @@ class MarianTokenizer(PreTrainedTokenizer):
# We don't expect to process pairs, but leave the pair logic for API consistency # We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_0 + token_ids_1 + [self.eos_token_id] return token_ids_0 + token_ids_1 + [self.eos_token_id]
@add_start_docstrings_to_callable(PREPARE_SEQ2SEQ_BATCH_DOCSTRING) @add_start_docstrings(PREPARE_SEQ2SEQ_BATCH_DOCSTRING)
def prepare_seq2seq_batch( def prepare_seq2seq_batch(
self, self,
src_texts: List[str], src_texts: List[str],
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
from typing import List, Optional from typing import List, Optional
from .file_utils import add_start_docstrings_to_callable from .file_utils import add_start_docstrings
from .tokenization_utils import BatchEncoding from .tokenization_utils import BatchEncoding
from .tokenization_utils_base import PREPARE_SEQ2SEQ_BATCH_DOCSTRING from .tokenization_utils_base import PREPARE_SEQ2SEQ_BATCH_DOCSTRING
from .tokenization_xlm_roberta import XLMRobertaTokenizer from .tokenization_xlm_roberta import XLMRobertaTokenizer
...@@ -156,7 +156,7 @@ class MBartTokenizer(XLMRobertaTokenizer): ...@@ -156,7 +156,7 @@ class MBartTokenizer(XLMRobertaTokenizer):
# We don't expect to process pairs, but leave the pair logic for API consistency # We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_0 + token_ids_1 + self.suffix_tokens return self.prefix_tokens + token_ids_0 + token_ids_1 + self.suffix_tokens
@add_start_docstrings_to_callable(PREPARE_SEQ2SEQ_BATCH_DOCSTRING) @add_start_docstrings(PREPARE_SEQ2SEQ_BATCH_DOCSTRING)
def prepare_seq2seq_batch( def prepare_seq2seq_batch(
self, self,
src_texts: List[str], src_texts: List[str],
......
...@@ -16,7 +16,7 @@ from typing import Dict, List, Optional ...@@ -16,7 +16,7 @@ from typing import Dict, List, Optional
from transformers.tokenization_reformer import ReformerTokenizer from transformers.tokenization_reformer import ReformerTokenizer
from .file_utils import add_start_docstrings_to_callable from .file_utils import add_start_docstrings
from .tokenization_utils_base import PREPARE_SEQ2SEQ_BATCH_DOCSTRING, BatchEncoding from .tokenization_utils_base import PREPARE_SEQ2SEQ_BATCH_DOCSTRING, BatchEncoding
...@@ -104,7 +104,7 @@ class PegasusTokenizer(ReformerTokenizer): ...@@ -104,7 +104,7 @@ class PegasusTokenizer(ReformerTokenizer):
# We don't expect to process pairs, but leave the pair logic for API consistency # We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_0 + token_ids_1 + [self.eos_token_id] return token_ids_0 + token_ids_1 + [self.eos_token_id]
@add_start_docstrings_to_callable(PREPARE_SEQ2SEQ_BATCH_DOCSTRING) @add_start_docstrings(PREPARE_SEQ2SEQ_BATCH_DOCSTRING)
def prepare_seq2seq_batch( def prepare_seq2seq_batch(
self, self,
src_texts: List[str], src_texts: List[str],
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment